In [ ]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.tsa.seasonal import seasonal_decompose
import math
import random

from sklearn.preprocessing import MinMaxScaler
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.models import Sequential
from keras.layers import RNN, SimpleRNN, LSTM, GRU, BatchNormalization, Dense
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler

random.seed(2)
np.random.seed(2)
tf.random.set_seed(2)
2024-09-09 18:00:07.495842: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-09 18:00:07.620245: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2024-09-09 18:00:07.620276: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2024-09-09 18:00:07.655198: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-09 18:00:08.370338: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory
2024-09-09 18:00:08.370422: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory
2024-09-09 18:00:08.370430: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
In [ ]:
daily_gold = pd.read_csv("/work/Daily/Commodities/Daily_XAU_USD_01-12-2014_01-12-2024.csv")
daily_oil = pd.read_csv("/work/Daily/Commodities/Daily_Crude Oil_01-12-2014_01-12-2024.csv")

daily_barrick = pd.read_csv("/work/Daily/Stock/Daily_BARRICK_01-12-2014_01-12-2024.csv")
daily_newmont = pd.read_csv("/work/Daily/Stock/Daily_NEWMONT_01-12-2014_01-12-2024.csv")
daily_chevron = pd.read_csv("/work/Daily/Stock/Daily_CHEVRON_01-12-2014_01-12-2024.csv")
daily_exxon = pd.read_csv("/work/Daily/Stock/Daily_EXXON_01-12-2014_01-12-2024.csv")

weekly_gold = pd.read_csv("/work/Weekly/Commodities/Weekly_XAU_USD_01-12-2014_01-12-2024.csv")
weekly_oil = pd.read_csv("/work/Weekly/Commodities/Weekly_Crude Oil_01-12-2014_01-12-2024.csv")

weekly_barrick = pd.read_csv("/work/Weekly/Stock/Weekly_BARRICK_01-12-2014_01-12-2024.csv")
weekly_newmont = pd.read_csv("/work/Weekly/Stock/Weekly_NEWMONT_01-12-2014_01-12-2024.csv")
weekly_chevron = pd.read_csv("/work/Weekly/Stock/Weekly_CHEVRON_01-12-2014_01-12-2024.csv")
weekly_exxon = pd.read_csv("/work/Weekly/Stock/Weekly_EXXON_01-12-2014_01-12-2024.csv")

daily_gold = daily_gold.drop("Vol.", axis=1)
daily_oil = daily_oil.drop("Vol.", axis=1)

daily_barrick = daily_barrick.drop("Vol.", axis=1)
daily_newmont = daily_newmont.drop("Vol.", axis=1)
daily_chevron = daily_chevron.drop("Vol.", axis=1)
daily_exxon = daily_exxon.drop("Vol.", axis=1)

weekly_gold = weekly_gold.drop("Vol.", axis=1)
weekly_oil = weekly_oil.drop("Vol.", axis=1)

weekly_barrick = weekly_barrick.drop("Vol.", axis=1)
weekly_newmont = weekly_newmont.drop("Vol.", axis=1)
weekly_chevron = weekly_chevron.drop("Vol.", axis=1)
weekly_exxon = weekly_exxon.drop("Vol.", axis=1)

daily_gold.iloc[:, 1:] = daily_gold.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)
daily_oil.iloc[:, 1:] = daily_oil.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)

daily_barrick.iloc[:, 1:] = daily_barrick.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)
daily_newmont.iloc[:, 1:] = daily_newmont.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)
daily_chevron.iloc[:, 1:] = daily_chevron.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)
daily_exxon.iloc[:, 1:] = daily_exxon.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)

weekly_gold.iloc[:, 1:] = weekly_barrick.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)
weekly_oil.iloc[:, 1:] = weekly_newmont.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)

weekly_barrick.iloc[:, 1:] = weekly_barrick.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)
weekly_newmont.iloc[:, 1:] = weekly_newmont.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)
weekly_chevron.iloc[:, 1:] = weekly_chevron.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)
weekly_exxon.iloc[:, 1:] = weekly_exxon.iloc[:, 1:].replace({',': '', '%': '', 'M':''}, regex=True).astype(float)

daily_gold.iloc[:, -1] *= 0.01
daily_oil.iloc[:, -1] *= 0.01
daily_barrick.iloc[:, -1] *= 0.01
daily_newmont.iloc[:, -1] *= 0.01
daily_chevron.iloc[:, -1] *= 0.01
daily_exxon.iloc[:, -1] *= 0.01

weekly_gold.iloc[:, -1] *= 0.01
weekly_oil.iloc[:, -1] *= 0.01
weekly_barrick.iloc[:, -1] *= 0.01
weekly_newmont.iloc[:, -1] *= 0.01
weekly_chevron.iloc[:, -1] *= 0.01
weekly_exxon.iloc[:, -1] *= 0.01

daily_gold['Date'] = pd.to_datetime(daily_gold['Date'], format='%m/%d/%Y', errors='coerce')
daily_oil['Date'] = pd.to_datetime(daily_oil['Date'], format='%m/%d/%Y', errors='coerce')

daily_barrick['Date'] = pd.to_datetime(daily_barrick['Date'], format='%m/%d/%Y', errors='coerce')
daily_newmont['Date'] = pd.to_datetime(daily_newmont['Date'], format='%m/%d/%Y', errors='coerce')
daily_chevron['Date'] = pd.to_datetime(daily_chevron['Date'], format='%m/%d/%Y', errors='coerce')
daily_exxon['Date'] = pd.to_datetime(daily_exxon['Date'], format='%m/%d/%Y', errors='coerce')

weekly_gold['Date'] = pd.to_datetime(weekly_gold['Date'], format='%m/%d/%Y', errors='coerce')
weekly_oil['Date'] = pd.to_datetime(weekly_oil['Date'], format='%m/%d/%Y', errors='coerce')

weekly_barrick['Date'] = pd.to_datetime(weekly_barrick['Date'], format='%m/%d/%Y', errors='coerce')
weekly_newmont['Date'] = pd.to_datetime(weekly_newmont['Date'], format='%m/%d/%Y', errors='coerce')
weekly_chevron['Date'] = pd.to_datetime(weekly_chevron['Date'], format='%m/%d/%Y', errors='coerce')
weekly_exxon['Date'] = pd.to_datetime(weekly_exxon['Date'], format='%m/%d/%Y', errors='coerce')

daily_gold.set_index('Date', inplace=True)
daily_oil.set_index('Date', inplace=True)

daily_barrick.set_index('Date', inplace=True)
daily_newmont.set_index('Date', inplace=True)
daily_chevron.set_index('Date', inplace=True)
daily_exxon.set_index('Date', inplace=True)

weekly_gold.set_index('Date', inplace=True)
weekly_oil.set_index('Date', inplace=True)

weekly_barrick.set_index('Date', inplace=True)
weekly_newmont.set_index('Date', inplace=True)
weekly_chevron.set_index('Date', inplace=True)
weekly_exxon.set_index('Date', inplace=True)
In [ ]:
master_daily = daily_gold.add_suffix('_gold')

dfs = [daily_oil, daily_barrick, daily_newmont, daily_exxon, daily_chevron]

suffixes = ['_oil', '_barrick', '_newmont', '_exxon', '_chevron']

suffix_dfs = []

for i in range(len(dfs)):
    suffix_dfs.append(dfs[i].add_suffix(suffixes[i]))

for i in range(len(dfs)):
    master_daily = master_daily.join(suffix_dfs[i])

master_daily = master_daily.dropna()

master_daily
Out[ ]:
Price_gold Open_gold High_gold Low_gold Change %_gold Price_oil Open_oil High_oil Low_oil Change %_oil ... Price_exxon Open_exxon High_exxon Low_exxon Change %_exxon Price_chevron Open_chevron High_chevron Low_chevron Change %_chevron
Date
2024-01-11 2028.09 2023.74 2039.69 2013.32 0.0023 72.02 71.33 73.81 71.17 0.0091 ... 98.67 99.04 99.50 98.56 -0.0002 145.28 145.56 146.17 144.62 0.0054
2024-01-10 2023.4 2029.94 2040.44 2020.45 -0.0031 71.37 72.17 73.59 71.01 -0.012 ... 98.69 99.80 99.80 98.16 -0.0098 144.50 146.02 146.02 144.11 -0.0082
2024-01-09 2029.59 2028.4 2042.09 2026.11 0.0009 72.24 70.91 72.93 70.47 0.0208 ... 99.67 101.29 101.29 99.19 -0.0124 145.70 149.95 149.95 145.59 -0.0254
2024-01-08 2027.84 2044.08 2046.71 2016.84 -0.0086 70.77 73.51 73.95 70.13 -0.0412 ... 100.92 100.73 101.04 98.90 -0.0167 149.50 148.42 149.70 146.58 -0.006
2024-01-05 2045.5 2043.69 2064.03 2024.49 0.0011 73.81 72.40 74.24 72.21 0.0224 ... 102.63 103.17 103.40 102.13 0.003 150.40 151.98 152.03 149.83 -0.0017
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
2014-01-17 1253.82 1242.53 1255.28 1238.05 0.0091 94.37 94.17 94.94 93.94 0.0044 ... 99.16 99.18 100.34 98.67 0.0022 119.29 119.03 120.38 118.74 0.0039
2014-01-16 1242.55 1241.49 1245.26 1236.55 0.0007 93.96 94.29 94.64 93.60 -0.0022 ... 98.94 98.79 99.04 98.25 0.0016 118.83 119.11 119.29 118.19 -0.0029
2014-01-15 1241.74 1245.1 1245.39 1234.49 -0.0027 94.17 92.65 94.64 92.43 0.0171 ... 98.78 99.16 99.35 98.63 -0.0034 119.18 119.69 120.07 119.05 -0.0033
2014-01-14 1245.1 1253.12 1255.38 1241.83 -0.0067 92.59 91.51 92.88 91.50 0.0086 ... 99.12 98.80 99.27 98.65 0.0058 119.57 119.40 119.75 118.80 0.0027
2014-01-13 1253.44 1248.28 1255.18 1243.72 0.0051 91.80 92.83 92.88 91.43 -0.0099 ... 98.55 99.91 100.10 98.27 -0.0196 119.25 120.81 120.90 119.01 -0.0145

2517 rows × 30 columns

EDA¶

Gold & Stocks Related¶

In [ ]:
#Gold

result_daily_gold = seasonal_decompose(daily_gold['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_gold.index, daily_gold['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_gold.index, result_daily_gold.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_gold.index, result_daily_gold.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_gold.index, result_daily_gold.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image
In [ ]:
#Barrick

result_daily_barrick = seasonal_decompose(daily_barrick['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_barrick.index, daily_barrick['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_barrick.index, result_daily_barrick.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_barrick.index, result_daily_barrick.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_barrick.index, result_daily_barrick.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image
In [ ]:
result_daily_newmont = seasonal_decompose(daily_newmont['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_newmont.index, daily_newmont['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_newmont.index, result_daily_newmont.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_newmont.index, result_daily_newmont.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_newmont.index, result_daily_newmont.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image

Oil & Related Stocks¶

Trend untuk harga saham chevron dan exxon mengikuti trend harga minyak sejak 2016. Namun, khususnya untuk harga saham exxon, sebelum tahun 2016 trendnya tidak mirip dengan trend harga minyak. Sselain itu, secara visual, seasonality dari harga saham exxon sangat mirip dengan harga sama chevron.

In [ ]:
result_daily_oil = seasonal_decompose(daily_oil['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_oil.index, daily_oil['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_oil.index, result_daily_oil.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_oil.index, result_daily_oil.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_oil.index, result_daily_oil.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image
In [ ]:
result_daily_chevron = seasonal_decompose(daily_chevron['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_chevron.index, daily_chevron['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_chevron.index, result_daily_chevron.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_chevron.index, result_daily_chevron.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_chevron.index, result_daily_chevron.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image
In [ ]:
result_daily_exxon = seasonal_decompose(daily_exxon['Price'], model='additive', period=365)

plt.figure(figsize=(12, 8))
plt.subplot(4, 1, 1)
plt.plot(daily_exxon.index, daily_exxon['Price'], label='Original Time Series')
plt.legend()

plt.subplot(4, 1, 2)
plt.plot(daily_exxon.index, result_daily_exxon.trend, label='Trend')
plt.legend()

plt.subplot(4, 1, 3)
plt.plot(daily_exxon.index, result_daily_exxon.seasonal, label='Seasonal')
plt.legend()

plt.subplot(4, 1, 4)
plt.plot(daily_exxon.index, result_daily_exxon.resid, label='Residuals')
plt.legend()

plt.tight_layout()
plt.show()
No description has been provided for this image

Autocorrelation¶

In [ ]:
autocorr_dailybarrick = pd.DataFrame()
autocorr_dailybarrick["Price"] = daily_barrick["Price"]
autocorr_dailybarrick["Price_Lag_1"] = autocorr_dailybarrick["Price"].shift(1)
autocorr_dailybarrick["Price_Lag_2"] = autocorr_dailybarrick["Price"].shift(2)
autocorr_dailybarrick["Price_Lag_3"] = autocorr_dailybarrick["Price"].shift(3)
In [ ]:
mean_nolag = autocorr_dailybarrick["Price"].mean()
# Sum of Squared Deviations
ssd_nolag = ((autocorr_dailybarrick["Price"] - mean_nolag) ** 2).sum()
autocorr_dailybarrick["wadits"] = autocorr_dailybarrick["Price_Lag_3"] - mean_nolag
autocorr = (autocorr_dailybarrick['Price'] * autocorr_dailybarrick['wadits']).sum() / ssd_nolag
autocorr
Out[ ]:
0.9866671797654983
In [ ]:
acorr = sm.tsa.acf(autocorr_dailybarrick["Price"], nlags = 5)
acorr
Out[ ]:
array([1.        , 0.99587858, 0.99175007, 0.98776114, 0.98360519,
       0.97943065])
In [ ]:
plot_acf(autocorr_dailybarrick["Price"])
Out[ ]:
No description has been provided for this image
No description has been provided for this image
In [ ]:
plot_acf(autocorr_dailybarrick["Price"], lags = 3)
Out[ ]:
No description has been provided for this image
No description has been provided for this image
0.98 AUTOCORRELATION IS VERY STRONG RELATIONSHIP¶

Cross Correlation¶

In [ ]:
from scipy.signal import correlate

# Compute correlations
gold_barrick_correlation = correlate(daily_gold['Price'], daily_barrick['Price'], mode='full')
gold_newmont_correlation = correlate(daily_gold['Price'], daily_newmont['Price'], mode='full')
oil_chevron_correlation = correlate(daily_oil['Price'], daily_chevron['Price'], mode='full')
oil_exxon_correlation = correlate(daily_oil['Price'], daily_exxon['Price'], mode='full')

# Compute lags
lags_gold_barrick = np.arange(-len(daily_gold['Price']) + 1, len(daily_barrick['Price']))
lags_gold_newmont = np.arange(-len(daily_gold['Price']) + 1, len(daily_newmont['Price']))
lags_oil_chevron = np.arange(-len(daily_oil['Price']) + 1, len(daily_chevron['Price']))
lags_oil_exxon = np.arange(-len(daily_oil['Price']) + 1, len(daily_exxon['Price']))

# Create subplots
fig, axs = plt.subplots(2, 2)

# Plot each correlation on its respective subplot
axs[0, 0].plot(lags_gold_barrick, gold_barrick_correlation)
axs[0, 0].set_title('Gold-Barrick Correlation')
axs[0, 0].set_xlabel('Lag')
axs[0, 0].set_ylabel('Cross-Correlation')
axs[0, 0].grid(True)

axs[0, 1].plot(lags_gold_newmont, gold_newmont_correlation)
axs[0, 1].set_title('Gold-Newmont Correlation')
axs[0, 1].set_xlabel('Lag')
axs[0, 1].set_ylabel('Cross-Correlation')
axs[0, 1].grid(True)

axs[1, 0].plot(lags_oil_chevron, oil_chevron_correlation)
axs[1, 0].set_title('Oil-Chevron Correlation')
axs[1, 0].set_xlabel('Lag')
axs[1, 0].set_ylabel('Cross-Correlation')
axs[1, 0].grid(True)

axs[1, 1].plot(lags_oil_exxon, oil_exxon_correlation)
axs[1, 1].set_title('Oil-Exxon Correlation')
axs[1, 1].set_xlabel('Lag')
axs[1, 1].set_ylabel('Cross-Correlation')
axs[1, 1].grid(True)

# Adjust layout to prevent overlap
plt.tight_layout()

# Show the plot
plt.show()
No description has been provided for this image
In [ ]:
from scipy.signal import correlate

gold_barrick_correlation = correlate(daily_gold['Price'], daily_barrick['Price'], mode='full')

lags_gold_barrick = np.arange(-len(daily_gold['Price']) + 1, len(daily_barrick['Price']))
plt.plot(lags_gold_barrick, gold_barrick_correlation)
plt.xlabel('Lag')
plt.ylabel('Cross-Correlation')
plt.title('Cross-Correlation Function of Gold Prices and Barrick')
plt.grid(True)
plt.show()
No description has been provided for this image
In [ ]:
gold_newmont_correlation = correlate(daily_gold['Price'], daily_newmont['Price'], mode='full')

lags_gold_newmont = np.arange(-len(daily_gold['Price']) + 1, len(daily_newmont['Price']))
plt.plot(lags_gold_newmont, gold_newmont_correlation)
plt.xlabel('Lag')
plt.ylabel('Cross-Correlation')
plt.title('Cross-Correlation Function of Gold Price and Newmont Stock Price')
plt.grid(True)
plt.show()
No description has been provided for this image
In [ ]:
oil_chevron_correlation = correlate(daily_oil['Price'], daily_chevron['Price'], mode='full')

lags_oil_chevron = np.arange(-len(daily_oil['Price']) + 1, len(daily_chevron['Price']))
plt.plot(lags_oil_chevron, oil_chevron_correlation)
plt.xlabel('Lag')
plt.ylabel('Cross-Correlation')
plt.title('Cross-Correlation Function of Oil Price and Chevron')
plt.grid(True)
plt.show()
No description has been provided for this image
In [ ]:
oil_exxon_correlation = correlate(daily_oil['Price'], daily_exxon['Price'], mode='full')

lags_oil_exxon = np.arange(-len(daily_oil['Price']) + 1, len(daily_exxon['Price']))
plt.plot(lags_oil_exxon, oil_exxon_correlation)
plt.xlabel('Lag')
plt.ylabel('Cross-Correlation')
plt.title('Cross-Correlation Function of Oil Prices and Exxon')
plt.grid(True)
plt.show()
No description has been provided for this image

Seasonality¶

In [ ]:
daily_barrick
Out[ ]:
Price Open High Low Change %
Date
2024-01-11 17.15 17.38 17.49 16.94 -0.011
2024-01-10 17.34 17.44 17.52 17.20 -0.0034
2024-01-09 17.40 17.69 17.70 17.37 -0.0164
2024-01-08 17.69 17.39 17.92 17.31 0.0114
2024-01-05 17.49 17.44 17.72 17.26 0.0081
... ... ... ... ... ...
2014-01-17 18.66 18.34 18.79 18.34 0.0304
2014-01-16 18.11 18.09 18.20 17.85 0.0095
2014-01-15 17.94 17.54 18.03 17.48 0.0136
2014-01-14 17.70 17.99 18.16 17.59 -0.0205
2014-01-13 18.07 18.03 18.10 17.59 -0.0006

2517 rows × 5 columns

Time-Series Aggregation¶

In [ ]:
daily_gold_gren = pd.read_csv("/work/Daily/Commodities/Daily_XAU_USD_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_gold_gren2 = pd.read_csv("/work/Daily/Commodities/Daily_XAU_USD_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_oil_gren = pd.read_csv("/work/Daily/Commodities/Daily_XBR_USD_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_oil_gren2 = pd.read_csv("/work/Daily/Commodities/Daily_XBR_USD_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_barrick_gren = pd.read_csv("/work/Daily/Stock/Daily_BARRICK_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_barrick_gren2 = pd.read_csv("/work/Daily/Stock/Daily_BARRICK_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_newmont_gren = pd.read_csv("/work/Daily/Stock/Daily_NEWMONT_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_newmont_gren2 = pd.read_csv("/work/Daily/Stock/Daily_NEWMONT_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_chevron_gren = pd.read_csv("/work/Daily/Stock/Daily_CHEVRON_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_chevron_gren2 = pd.read_csv("/work/Daily/Stock/Daily_CHEVRON_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_exxon_gren = pd.read_csv("/work/Daily/Stock/Daily_EXXON_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
daily_exxon_gren2 = pd.read_csv("/work/Daily/Stock/Daily_EXXON_01-12-2014_01-12-2024.csv").drop(["Vol.", "Open", "High", "Low", "Change %"], axis = 1)
In [ ]:
daily_gold_gren['Date'] = pd.to_datetime(daily_gold_gren['Date'])
daily_gold_gren2['Date'] = pd.to_datetime(daily_gold_gren2['Date'])
daily_gold_gren.set_index('Date', inplace=True)
daily_gold_gren2.set_index('Date', inplace=True)

daily_oil_gren['Date'] = pd.to_datetime(daily_oil_gren['Date'])
daily_oil_gren2['Date'] = pd.to_datetime(daily_oil_gren2['Date'])
daily_oil_gren.set_index('Date', inplace=True)
daily_oil_gren2.set_index('Date', inplace=True)

daily_gold_gren['Price'] = pd.to_numeric(daily_gold_gren['Price'].str.replace(',', ''), errors ='coerce')
daily_gold_gren2['Price'] = pd.to_numeric(daily_gold_gren2['Price'].str.replace(',', ''), errors='coerce')
if daily_oil_gren['Price'].dtype == 'O':
    daily_oil_gren['Price'] = pd.to_numeric(daily_oil_gren['Price'].str.replace(',', ''), errors='coerce')
if daily_oil_gren2['Price'].dtype == 'O':
    daily_oil_gren2['Price'] = pd.to_numeric(daily_oil_gren2['Price'].str.replace(',', ''), errors='coerce')

weekly_gold_gren = daily_gold_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_gold_gren = daily_gold_gren2['Price'].resample('M').mean().to_frame(name='Price')
weekly_oil_gren = daily_oil_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_oil_gren = daily_oil_gren2['Price'].resample('M').mean().to_frame(name='Price')
In [ ]:
daily_barrick_gren['Date'] = pd.to_datetime(daily_barrick_gren['Date'])
daily_barrick_gren2['Date'] = pd.to_datetime(daily_barrick_gren2['Date'])
daily_barrick_gren.set_index('Date', inplace=True)
daily_barrick_gren2.set_index('Date', inplace=True)

daily_newmont_gren['Date'] = pd.to_datetime(daily_newmont_gren['Date'])
daily_newmont_gren2['Date'] = pd.to_datetime(daily_newmont_gren2['Date'])
daily_newmont_gren.set_index('Date', inplace=True)
daily_newmont_gren2.set_index('Date', inplace=True)

daily_chevron_gren['Date'] = pd.to_datetime(daily_chevron_gren['Date'])
daily_chevron_gren2['Date'] = pd.to_datetime(daily_chevron_gren2['Date'])
daily_chevron_gren.set_index('Date', inplace=True)
daily_chevron_gren2.set_index('Date', inplace=True)

daily_exxon_gren['Date'] = pd.to_datetime(daily_exxon_gren['Date'])
daily_exxon_gren2['Date'] = pd.to_datetime(daily_exxon_gren2['Date'])
daily_exxon_gren.set_index('Date', inplace=True)
daily_exxon_gren2.set_index('Date', inplace=True)

daily_barrick_gren['Price'] = pd.to_numeric(daily_barrick_gren['Price'])
daily_barrick_gren2['Price'] = pd.to_numeric(daily_barrick_gren2['Price'])
daily_newmont_gren['Price'] = pd.to_numeric(daily_newmont_gren['Price'])
daily_newmont_gren2['Price'] = pd.to_numeric(daily_newmont_gren2['Price'])
daily_chevron_gren['Price'] = pd.to_numeric(daily_chevron_gren['Price'])
daily_chevron_gren2['Price'] = pd.to_numeric(daily_chevron_gren2['Price'])
daily_exxon_gren['Price'] = pd.to_numeric(daily_exxon_gren['Price'])
daily_exxon_gren2['Price'] = pd.to_numeric(daily_exxon_gren2['Price'])

weekly_barrick_gren = daily_barrick_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_barrick_gren = daily_barrick_gren2['Price'].resample('M').mean().to_frame(name='Price')
weekly_newmont_gren = daily_newmont_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_newmont_gren = daily_newmont_gren2['Price'].resample('M').mean().to_frame(name='Price')
weekly_chevron_gren = daily_chevron_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_chevron_gren = daily_chevron_gren2['Price'].resample('M').mean().to_frame(name='Price')
weekly_exxon_gren = daily_exxon_gren['Price'].resample('W').mean().to_frame(name='Price')
monthly_exxon_gren = daily_exxon_gren2['Price'].resample('M').mean().to_frame(name='Price')
In [ ]:
plt.figure(figsize=(15, 10))

plt.subplot(2, 2, 1)
plt.plot(monthly_gold_gren.index, monthly_gold_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Monthly Average Gold Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(weekly_gold_gren.index, weekly_gold_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Weekly Average Gold Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(monthly_oil_gren.index, monthly_oil_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Monthly Average Oil Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(weekly_oil_gren.index, weekly_oil_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Weekly Average Oil Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.tight_layout()
plt.show()
No description has been provided for this image
  • Gold prices are slightly more stable than oil prices. The graphs (especially monthly) shows that gold prices have a smoother trend and less volatility than oil prices. Another way to interpret this is that gold prices are less likely to be affected by external factors.
  • Gold prices are currently increasing by a significant amount since 2018, while oil prices have their peak recently at around 2022, they are currently decreasing.
In [ ]:
plt.figure(figsize=(15, 10))

plt.subplot(2, 2, 1)
plt.plot(monthly_barrick_gren.index, monthly_barrick_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Monthly Average Barrick Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(weekly_barrick_gren.index, weekly_barrick_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Weekly Average Barrick Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(monthly_newmont_gren.index, monthly_newmont_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Monthly Average Newmont Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(weekly_newmont_gren.index, weekly_newmont_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Weekly Average Newmont Stock Prices')
plt.xlabel('Yea')
plt.ylabel('Price')
plt.grid(True)

plt.tight_layout()
plt.show()
No description has been provided for this image

The graphs indicate that both company stock prices have experienced fluctuations over the years, with some periods of growth and decline. These two stock prices tend to move in the same direction, suggesting that they are both influenced by the same factors (example: gold prices, production costs, industry competition)

In [ ]:
plt.figure(figsize=(15, 10))

plt.subplot(2, 2, 1)
plt.plot(monthly_chevron_gren.index, monthly_chevron_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Monthly Average Chevron Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(weekly_chevron_gren.index, weekly_chevron_gren['Price'], marker='x', linestyle='solid', color='r')
plt.title('Weekly Average Chevron Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(monthly_exxon_gren.index, monthly_exxon_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Monthly Average Exxon Stock Prices')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(weekly_exxon_gren.index, weekly_exxon_gren['Price'], marker='x', linestyle='solid', color='b')
plt.title('Weekly Average Exxon Stock Prices')
plt.xlabel('Yea')
plt.ylabel('Price')
plt.grid(True)

plt.tight_layout()
plt.show()
No description has been provided for this image

The fluctuations in stock prices of these oil-producing companies share a really strong resemblance. During each period of time, the trend is closely related. Whether the price is peaking or bottoming out.

RNN¶

Gold¶

In [ ]:
#Gold

# Define your features (dependent variables) and target (independent variable)
features = ['Open', 'High', 'Low', 'Change %']
target = 'Price'

# Normalize the data
scaler = MinMaxScaler()
daily_gold_scaled = pd.DataFrame()
daily_gold_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_gold[['Open', 'High', 'Low', 'Change %','Price']])
daily_gold_time = daily_gold.index
daily_gold_scaled.set_index(daily_gold_time,inplace = True)
daily_gold_scaled = daily_gold_scaled[::-1]

# Define the window size for the RNN
window_size = 5

# Function to create input data sequences and labels
def create_sequences(data, window_size):
    X = []
    y = []
    for i in range(len(data) - window_size):
        X.append(data[features][i:i+window_size].values)
        y.append([data[target][i+window_size]])
    return np.array(X), np.array(y)

# Create sequences and labels
X_gold, y_gold = create_sequences(daily_gold_scaled, window_size)

y_gold = pd.DataFrame(y_gold).set_index(daily_gold_scaled.index[window_size:len(daily_gold_scaled)])

# Split the data into training and testing sets
X_train_gold, X_test_gold, y_train_gold, y_test_gold = train_test_split(X_gold, y_gold, test_size=0.2, shuffle=False)

# Define the model
model_RNN_Gold = Sequential([
    SimpleRNN(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(64, activation = 'relu', return_sequences=True),
    SimpleRNN(32, activation = 'relu', return_sequences=True),
    SimpleRNN(16, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Gold.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Gold.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_RNN_Gold_train = model_RNN_Gold.evaluate(X_train_gold, y_train_gold)
loss_RNN_Gold_test = model_RNN_Gold.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_RNN_Gold_train)
print("Test Loss:", loss_RNN_Gold_test)

#Predict
pred_gold_train_RNN = model_RNN_Gold.predict(X_train_gold)
pred_gold_train_RNN = pred_gold_train_RNN.reshape(len(pred_gold_train_RNN))

pred_gold_test_RNN = model_RNN_Gold.predict(X_test_gold)
pred_gold_test_RNN = pred_gold_test_RNN.reshape(len(pred_gold_test_RNN))
In [ ]:
# Mini model
model_RNN_Gold_mini = Sequential([
    SimpleRNN(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Gold_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Gold_mini.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

loss_RNN_Gold_mini_train = model_RNN_Gold_mini.evaluate(X_train_gold, y_train_gold)
loss_RNN_Gold_mini_test = model_RNN_Gold_mini.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_RNN_Gold_mini_train)
print("Test Loss:", loss_RNN_Gold_mini_test)

#Predict
pred_gold_train_RNN_mini = model_RNN_Gold_mini.predict(X_train_gold)
pred_gold_train_RNN_mini = pred_gold_train_RNN_mini.reshape(len(pred_gold_train_RNN_mini))

pred_gold_test_RNN_mini = model_RNN_Gold.predict(X_test_gold)
pred_gold_test_RNN_mini = pred_gold_test_RNN.reshape(len(pred_gold_test_RNN_mini))
Epoch 1/50
52/52 [==============================] - 1s 8ms/step - loss: 0.3074 - val_loss: 1.1423
Epoch 2/50
52/52 [==============================] - 0s 5ms/step - loss: 0.1034 - val_loss: 0.6857
Epoch 3/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0453 - val_loss: 0.4286
Epoch 4/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0308 - val_loss: 0.4032
Epoch 5/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0265 - val_loss: 0.3799
Epoch 6/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0232 - val_loss: 0.3591
Epoch 7/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0208 - val_loss: 0.3417
Epoch 8/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0192 - val_loss: 0.3266
Epoch 9/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0181 - val_loss: 0.3144
Epoch 10/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0174 - val_loss: 0.3053
Epoch 11/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0170 - val_loss: 0.2977
Epoch 12/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0167 - val_loss: 0.2916
Epoch 13/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0166 - val_loss: 0.2874
Epoch 14/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0165 - val_loss: 0.2844
Epoch 15/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0165 - val_loss: 0.2815
Epoch 16/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0164 - val_loss: 0.2805
Epoch 17/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0164 - val_loss: 0.2794
Epoch 18/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0161 - val_loss: 0.2777
Epoch 19/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0138 - val_loss: 0.2720
Epoch 20/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0115 - val_loss: 0.2566
Epoch 21/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0106 - val_loss: 0.2436
Epoch 22/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0098 - val_loss: 0.2306
Epoch 23/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0090 - val_loss: 0.2189
Epoch 24/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0084 - val_loss: 0.2075
Epoch 25/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0078 - val_loss: 0.1971
Epoch 26/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0072 - val_loss: 0.1876
Epoch 27/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0067 - val_loss: 0.1787
Epoch 28/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0063 - val_loss: 0.1699
Epoch 29/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0059 - val_loss: 0.1623
Epoch 30/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0055 - val_loss: 0.1543
Epoch 31/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0051 - val_loss: 0.1468
Epoch 32/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0048 - val_loss: 0.1404
Epoch 33/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0045 - val_loss: 0.1342
Epoch 34/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0042 - val_loss: 0.1283
Epoch 35/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0040 - val_loss: 0.1228
Epoch 36/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0038 - val_loss: 0.1181
Epoch 37/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0036 - val_loss: 0.1131
Epoch 38/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0034 - val_loss: 0.1090
Epoch 39/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0033 - val_loss: 0.1053
Epoch 40/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0031 - val_loss: 0.1018
Epoch 41/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0030 - val_loss: 0.0986
Epoch 42/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0029 - val_loss: 0.0952
Epoch 43/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0028 - val_loss: 0.0924
Epoch 44/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0027 - val_loss: 0.0899
Epoch 45/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0026 - val_loss: 0.0872
Epoch 46/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0025 - val_loss: 0.0852
Epoch 47/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0025 - val_loss: 0.0830
Epoch 48/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0024 - val_loss: 0.0806
Epoch 49/50
52/52 [==============================] - 0s 4ms/step - loss: 0.0023 - val_loss: 0.0785
Epoch 50/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0023 - val_loss: 0.0759
65/65 [==============================] - 0s 2ms/step - loss: 0.0169
17/17 [==============================] - 0s 2ms/step - loss: 0.1102
Train Loss: 0.01693212240934372
Test Loss: 0.11024937033653259
65/65 [==============================] - 0s 1ms/step
17/17 [==============================] - 0s 2ms/step
In [ ]:
def inverse_minmax_scaling(scaled_data, min_val, max_val):
    original_data = scaled_data * (max_val - min_val) + min_val
    return original_data

y_train_gold_true = pd.DataFrame(inverse_minmax_scaling(y_train_gold, min(daily_gold['Price']), max(daily_gold['Price'])))
y_test_gold_true = pd.DataFrame(inverse_minmax_scaling(y_test_gold, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_test_RNN = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_RNN, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_RNN = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_RNN, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_RNN.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_RNN.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
pred_gold_test_RNN_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_RNN_mini, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_RNN_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_RNN_mini, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_RNN_mini.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_RNN_mini.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted gold prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_train_RNN, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_gold_true, pred_gold_train_RNN))
No description has been provided for this image
Root Mean Squared Error (RMSE): 215.1287447222792
In [ ]:
# Plotting the actual vs. predicted gold prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_test_RNN, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_gold_true, pred_gold_test_RNN))
No description has been provided for this image
Root Mean Squared Error (RMSE): 713.7383548195609

Oil¶

In [ ]:
#daily_oil.loc['2020-04-20'] = [18.27, 20, 20.22, 17.31, 0]
In [ ]:
#Oil

# Normalize the data
daily_oil_scaled = pd.DataFrame()
daily_oil_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_oil[['Open', 'High', 'Low', 'Change %','Price']])
daily_oil_time = daily_oil.index
daily_oil_scaled.set_index(daily_oil_time,inplace = True)
daily_oil_scaled = daily_oil_scaled[::-1]

# Create sequences and labels
X_oil, y_oil = create_sequences(daily_oil_scaled, window_size)

y_oil = pd.DataFrame(y_oil).set_index(daily_oil_scaled.index[window_size:len(daily_oil_scaled)])

# Split the data into training and testing sets
X_train_oil, X_test_oil, y_train_oil, y_test_oil = train_test_split(X_oil, y_oil, test_size=0.2, shuffle=False)

# Define the model
model_RNN_Oil = Sequential([
    SimpleRNN(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(64, activation = 'relu', return_sequences=True),
    SimpleRNN(32, activation = 'relu', return_sequences=True),
    SimpleRNN(16,  activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Oil.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Oil.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_RNN_Oil_train = model_RNN_Oil.evaluate(X_train_oil, y_train_oil)
loss_RNN_Oil_test = model_RNN_Oil.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_RNN_Oil_train)
print("Test Loss:", loss_RNN_Oil_test)

#Predict
pred_oil_train_RNN = model_RNN_Oil.predict(X_train_oil)
pred_oil_train_RNN = pred_oil_train_RNN.reshape(len(pred_oil_train_RNN))

pred_oil_test_RNN = model_RNN_Oil.predict(X_test_oil)
pred_oil_test_RNN = pred_oil_test_RNN.reshape(len(pred_oil_test_RNN))
In [ ]:
# Mini model
model_RNN_Oil_mini = Sequential([
    SimpleRNN(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Oil_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Oil_mini.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

loss_RNN_Oil_mini_train = model_RNN_Oil_mini.evaluate(X_train_oil, y_train_oil)
loss_RNN_Oil_mini_test = model_RNN_Oil_mini.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_RNN_Oil_mini_train)
print("Test Loss:", loss_RNN_Oil_mini_test)

#Predict
pred_oil_train_RNN_mini = model_RNN_Oil_mini.predict(X_train_oil)
pred_oil_train_RNN_mini = pred_oil_train_RNN_mini.reshape(len(pred_oil_train_RNN_mini))

pred_oil_test_RNN_mini = model_RNN_Oil.predict(X_test_oil)
pred_oil_test_RNN_mini = pred_oil_test_RNN.reshape(len(pred_oil_test_RNN_mini))
Epoch 1/50
63/63 [==============================] - 1s 8ms/step - loss: 0.5682 - val_loss: 0.2510
Epoch 2/50
63/63 [==============================] - 0s 6ms/step - loss: 0.3502 - val_loss: 0.1985
Epoch 3/50
63/63 [==============================] - 0s 5ms/step - loss: 0.2922 - val_loss: 0.1554
Epoch 4/50
63/63 [==============================] - 0s 5ms/step - loss: 0.2411 - val_loss: 0.1191
Epoch 5/50
63/63 [==============================] - 0s 5ms/step - loss: 0.1972 - val_loss: 0.0894
Epoch 6/50
63/63 [==============================] - 0s 5ms/step - loss: 0.1601 - val_loss: 0.0657
Epoch 7/50
63/63 [==============================] - 0s 6ms/step - loss: 0.1294 - val_loss: 0.0472
Epoch 8/50
63/63 [==============================] - 0s 5ms/step - loss: 0.1041 - val_loss: 0.0333
Epoch 9/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0838 - val_loss: 0.0231
Epoch 10/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0676 - val_loss: 0.0161
Epoch 11/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0549 - val_loss: 0.0116
Epoch 12/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0452 - val_loss: 0.0090
Epoch 13/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0378 - val_loss: 0.0079
Epoch 14/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0323 - val_loss: 0.0078
Epoch 15/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0283 - val_loss: 0.0083
Epoch 16/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0254 - val_loss: 0.0092
Epoch 17/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0233 - val_loss: 0.0099
Epoch 18/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0215 - val_loss: 0.0074
Epoch 19/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0180 - val_loss: 0.0051
Epoch 20/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0157 - val_loss: 0.0053
Epoch 21/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0139 - val_loss: 0.0054
Epoch 22/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0124 - val_loss: 0.0053
Epoch 23/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0111 - val_loss: 0.0051
Epoch 24/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0099 - val_loss: 0.0046
Epoch 25/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0087 - val_loss: 0.0040
Epoch 26/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0076 - val_loss: 0.0034
Epoch 27/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0065 - val_loss: 0.0028
Epoch 28/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0055 - val_loss: 0.0022
Epoch 29/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0046 - val_loss: 0.0018
Epoch 30/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0038 - val_loss: 0.0014
Epoch 31/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0032 - val_loss: 0.0011
Epoch 32/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0026 - val_loss: 9.1457e-04
Epoch 33/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0021 - val_loss: 8.0404e-04
Epoch 34/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0018 - val_loss: 7.2816e-04
Epoch 35/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0015 - val_loss: 6.8880e-04
Epoch 36/50
63/63 [==============================] - 0s 6ms/step - loss: 0.0012 - val_loss: 6.7067e-04
Epoch 37/50
63/63 [==============================] - 0s 5ms/step - loss: 0.0010 - val_loss: 6.6040e-04
Epoch 38/50
63/63 [==============================] - 0s 5ms/step - loss: 8.7860e-04 - val_loss: 6.5626e-04
Epoch 39/50
63/63 [==============================] - 0s 5ms/step - loss: 7.5800e-04 - val_loss: 6.5735e-04
Epoch 40/50
63/63 [==============================] - 0s 5ms/step - loss: 6.5866e-04 - val_loss: 6.5803e-04
Epoch 41/50
63/63 [==============================] - 0s 6ms/step - loss: 5.7767e-04 - val_loss: 6.5490e-04
Epoch 42/50
63/63 [==============================] - 0s 5ms/step - loss: 5.1048e-04 - val_loss: 6.6335e-04
Epoch 43/50
63/63 [==============================] - 0s 6ms/step - loss: 4.5399e-04 - val_loss: 6.6570e-04
Epoch 44/50
63/63 [==============================] - 0s 5ms/step - loss: 4.0873e-04 - val_loss: 6.6665e-04
Epoch 45/50
63/63 [==============================] - 0s 5ms/step - loss: 3.6797e-04 - val_loss: 6.6889e-04
Epoch 46/50
63/63 [==============================] - 0s 5ms/step - loss: 3.3542e-04 - val_loss: 6.7698e-04
Epoch 47/50
63/63 [==============================] - 0s 5ms/step - loss: 3.0534e-04 - val_loss: 6.7433e-04
Epoch 48/50
63/63 [==============================] - 0s 5ms/step - loss: 2.8032e-04 - val_loss: 6.7622e-04
Epoch 49/50
63/63 [==============================] - 0s 5ms/step - loss: 2.5972e-04 - val_loss: 6.8695e-04
Epoch 50/50
63/63 [==============================] - 0s 6ms/step - loss: 2.4313e-04 - val_loss: 6.8525e-04
79/79 [==============================] - 0s 2ms/step - loss: 3.2238e-04
20/20 [==============================] - 0s 1ms/step - loss: 9.6223e-04
Train Loss: 0.00032238158746622503
Test Loss: 0.000962233985774219
79/79 [==============================] - 0s 2ms/step
20/20 [==============================] - 0s 2ms/step
In [ ]:
y_train_oil_true = pd.DataFrame(inverse_minmax_scaling(y_train_oil, min(daily_oil['Price']), max(daily_oil['Price'])))
y_test_oil_true = pd.DataFrame(inverse_minmax_scaling(y_test_oil, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_test_RNN = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_RNN, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_RNN = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_RNN, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_RNN.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_RNN.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
pred_oil_test_RNN_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_RNN_mini, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_RNN_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_RNN_mini, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_RNN_mini.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_RNN_mini.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted oil prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_train_RNN, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_oil_true, pred_oil_train_RNN))
No description has been provided for this image
Root Mean Squared Error (RMSE): 3.8902457238463874
In [ ]:
# Plotting the actual vs. predicted oil prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_test_RNN, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_oil_true, pred_oil_test_RNN))
No description has been provided for this image
Root Mean Squared Error (RMSE): 8.38547355923431

Using RNN model to predict stock price¶

Gold related stocks¶

In [ ]:
# Creating the dataframe and X,y values for Barrick
daily_barrick_scaled = pd.DataFrame()
daily_barrick_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_barrick[['Open', 'High', 'Low', 'Change %','Price']])
daily_barrick_time = daily_barrick.index
daily_barrick_scaled.set_index(daily_barrick_time,inplace = True)
daily_barrick_scaled = daily_barrick_scaled[::-1]

X_barrick, y_barrick = create_sequences(daily_barrick_scaled, window_size)
y_barrick = pd.DataFrame(y_barrick).set_index(daily_barrick_scaled.index[window_size:len(daily_barrick_scaled)])

# Create the predictions
pred_barrick_RNN = model_RNN_Gold.predict(X_barrick)
pred_barrick_RNN = pred_barrick_RNN.reshape(len(pred_barrick_RNN))

y_barrick_true = pd.DataFrame(inverse_minmax_scaling(y_barrick, min(daily_barrick['Price']), max(daily_barrick['Price'])))
pred_barrick_RNN_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_RNN, min(daily_barrick['Price']), max(daily_barrick['Price'])))

pred_barrick_RNN_true.set_index(y_barrick_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted barrick stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_barrick_true, label='Actual Price', color='blue')
plt.plot(pred_barrick_RNN_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barricks Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_barrick_true, pred_barrick_RNN_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_barrick, pred_barrick_RNN))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 0.29008724497797883
Mean Squared Error of Scaled Data (MSE): 0.0005023671662930076
In [ ]:
# Creating the dataframe and X,y values for Newmont
daily_newmont_scaled = pd.DataFrame()
daily_newmont_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_newmont[['Open', 'High', 'Low', 'Change %','Price']])
daily_newmont_time = daily_newmont.index
daily_newmont_scaled.set_index(daily_newmont_time,inplace = True)
daily_newmont_scaled = daily_newmont_scaled[::-1]

X_newmont, y_newmont = create_sequences(daily_newmont_scaled, window_size)
y_newmont = pd.DataFrame(y_newmont).set_index(daily_newmont_scaled.index[window_size:len(daily_newmont_scaled)])

# Create the predictions
pred_newmont_RNN = model_RNN_Gold.predict(X_newmont)
pred_newmont_RNN = pred_newmont_RNN.reshape(len(pred_newmont_RNN))

y_newmont_true = pd.DataFrame(inverse_minmax_scaling(y_newmont, min(daily_newmont['Price']), max(daily_newmont['Price'])))
pred_newmont_RNN_true = pd.DataFrame(inverse_minmax_scaling(pred_newmont_RNN, min(daily_newmont['Price']), max(daily_newmont['Price'])))

pred_newmont_RNN_true.set_index(y_newmont_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted newmont stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_newmont_true, label='Actual Price', color='blue')
plt.plot(pred_newmont_RNN_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Newmonts Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_newmont_true, pred_newmont_RNN_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_newmont, pred_newmont_RNN))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 1.4396684817060446
Mean Squared Error of Scaled Data (MSE): 0.000291722444852658

Oil related stocks¶

In [ ]:
# Creating the dataframe and X,y values for Chevron
daily_chevron_scaled = pd.DataFrame()
daily_chevron_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_chevron[['Open', 'High', 'Low', 'Change %','Price']])
daily_chevron_time = daily_chevron.index
daily_chevron_scaled.set_index(daily_chevron_time,inplace = True)
daily_chevron_scaled = daily_chevron_scaled[::-1]

X_chevron, y_chevron = create_sequences(daily_chevron_scaled, window_size)
y_chevron = pd.DataFrame(y_chevron).set_index(daily_chevron_scaled.index[window_size:len(daily_chevron_scaled)])

# Create the predictions
pred_chevron_RNN = model_RNN_Oil.predict(X_chevron)
pred_chevron_RNN = pred_chevron_RNN.reshape(len(pred_chevron_RNN))

y_chevron_true = pd.DataFrame(inverse_minmax_scaling(y_chevron, min(daily_chevron['Price']), max(daily_chevron['Price'])))
pred_chevron_RNN_true = pd.DataFrame(inverse_minmax_scaling(pred_chevron_RNN, min(daily_chevron['Price']), max(daily_chevron['Price'])))

pred_chevron_RNN_true.set_index(y_chevron_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted chevron stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_chevron_true, label='Actual Price', color='blue')
plt.plot(pred_chevron_RNN_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Chevrons Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_chevron_true, pred_chevron_RNN_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_chevron, pred_chevron_RNN))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 49.541037169636404
Mean Squared Error of Scaled Data (MSE): 0.0027660369486865255
In [ ]:
# Creating the dataframe and X,y values for Exxon
daily_exxon_scaled = pd.DataFrame()
daily_exxon_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_exxon[['Open', 'High', 'Low', 'Change %','Price']])
daily_exxon_time = daily_exxon.index
daily_exxon_scaled.set_index(daily_exxon_time,inplace = True)
daily_exxon_scaled = daily_exxon_scaled[::-1]

X_exxon, y_exxon = create_sequences(daily_exxon_scaled, window_size)
y_exxon = pd.DataFrame(y_exxon).set_index(daily_exxon_scaled.index[window_size:len(daily_exxon_scaled)])

# Create the predictions
pred_exxon_RNN = model_RNN_Oil.predict(X_exxon)
pred_exxon_RNN = pred_exxon_RNN.reshape(len(pred_exxon_RNN))

y_exxon_true = pd.DataFrame(inverse_minmax_scaling(y_exxon, min(daily_exxon['Price']), max(daily_exxon['Price'])))
pred_exxon_RNN_true = pd.DataFrame(inverse_minmax_scaling(pred_exxon_RNN, min(daily_exxon['Price']), max(daily_exxon['Price'])))

pred_exxon_RNN_true.set_index(y_exxon_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted exxon stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_exxon_true, label='Actual Price', color='blue')
plt.plot(pred_exxon_RNN_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Exxons Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_exxon_true, pred_exxon_RNN_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_exxon, pred_exxon_RNN))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 22.827442548253515
Root Mean Squared Error of Scaled Data (RMSE): 0.0028981474702628397

GRU¶

Gold¶

In [ ]:
#Gold

# Define the model
model_GRU_Gold = Sequential([
    GRU(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    GRU(64, activation = 'relu', return_sequences=True),
    GRU(32, activation = 'relu', return_sequences=True),
    GRU(16, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_GRU_Gold.compile(optimizer='adam', loss='mse')

# Train the model
model_GRU_Gold.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_GRU_Gold_train = model_GRU_Gold.evaluate(X_train_gold, y_train_gold)
loss_GRU_Gold_test = model_GRU_Gold.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_GRU_Gold_train)
print("Test Loss:", loss_GRU_Gold_test)

#Predict
pred_gold_train_GRU = model_GRU_Gold.predict(X_train_gold)
pred_gold_train_GRU = pred_gold_train_GRU.reshape(len(pred_gold_train_GRU))

pred_gold_test_GRU = model_GRU_Gold.predict(X_test_gold)
pred_gold_test_GRU = pred_gold_test_GRU.reshape(len(pred_gold_test_GRU))
Epoch 1/50
52/52 [==============================] - 4s 22ms/step - loss: 0.0121 - val_loss: 0.0108
Epoch 2/50
52/52 [==============================] - 1s 14ms/step - loss: 5.7019e-04 - val_loss: 0.0014
Epoch 3/50
52/52 [==============================] - 1s 16ms/step - loss: 4.3929e-04 - val_loss: 9.2561e-04
Epoch 4/50
52/52 [==============================] - 1s 17ms/step - loss: 4.3184e-04 - val_loss: 8.5992e-04
Epoch 5/50
52/52 [==============================] - 1s 14ms/step - loss: 3.7305e-04 - val_loss: 8.7621e-04
Epoch 6/50
52/52 [==============================] - 1s 14ms/step - loss: 3.3299e-04 - val_loss: 0.0017
Epoch 7/50
52/52 [==============================] - 1s 16ms/step - loss: 3.3827e-04 - val_loss: 7.5532e-04
Epoch 8/50
52/52 [==============================] - 1s 15ms/step - loss: 3.0106e-04 - val_loss: 6.7105e-04
Epoch 9/50
52/52 [==============================] - 1s 14ms/step - loss: 2.7953e-04 - val_loss: 0.0028
Epoch 10/50
52/52 [==============================] - 1s 14ms/step - loss: 2.6124e-04 - val_loss: 6.4856e-04
Epoch 11/50
52/52 [==============================] - 1s 15ms/step - loss: 2.8181e-04 - val_loss: 5.8847e-04
Epoch 12/50
52/52 [==============================] - 1s 14ms/step - loss: 2.6051e-04 - val_loss: 0.0018
Epoch 13/50
52/52 [==============================] - 1s 14ms/step - loss: 2.6074e-04 - val_loss: 7.4352e-04
Epoch 14/50
52/52 [==============================] - 1s 15ms/step - loss: 2.2178e-04 - val_loss: 5.2620e-04
Epoch 15/50
52/52 [==============================] - 1s 15ms/step - loss: 2.1168e-04 - val_loss: 0.0012
Epoch 16/50
52/52 [==============================] - 1s 15ms/step - loss: 1.9552e-04 - val_loss: 4.4322e-04
Epoch 17/50
52/52 [==============================] - 1s 14ms/step - loss: 2.0164e-04 - val_loss: 4.7700e-04
Epoch 18/50
52/52 [==============================] - 1s 15ms/step - loss: 1.8180e-04 - val_loss: 4.5588e-04
Epoch 19/50
52/52 [==============================] - 1s 14ms/step - loss: 1.6521e-04 - val_loss: 3.8108e-04
Epoch 20/50
52/52 [==============================] - 1s 15ms/step - loss: 1.5870e-04 - val_loss: 3.6681e-04
Epoch 21/50
52/52 [==============================] - 1s 17ms/step - loss: 1.6486e-04 - val_loss: 4.6971e-04
Epoch 22/50
52/52 [==============================] - 1s 14ms/step - loss: 1.6114e-04 - val_loss: 4.7418e-04
Epoch 23/50
52/52 [==============================] - 1s 14ms/step - loss: 1.5117e-04 - val_loss: 0.0023
Epoch 24/50
52/52 [==============================] - 1s 15ms/step - loss: 1.5245e-04 - val_loss: 6.1125e-04
Epoch 25/50
52/52 [==============================] - 1s 15ms/step - loss: 1.6281e-04 - val_loss: 4.4902e-04
Epoch 26/50
52/52 [==============================] - 1s 15ms/step - loss: 1.5153e-04 - val_loss: 4.9293e-04
Epoch 27/50
52/52 [==============================] - 1s 14ms/step - loss: 1.6309e-04 - val_loss: 3.1367e-04
Epoch 28/50
52/52 [==============================] - 1s 15ms/step - loss: 1.3451e-04 - val_loss: 3.7317e-04
Epoch 29/50
52/52 [==============================] - 1s 15ms/step - loss: 1.4637e-04 - val_loss: 4.0086e-04
Epoch 30/50
52/52 [==============================] - 1s 14ms/step - loss: 1.6495e-04 - val_loss: 3.4766e-04
Epoch 31/50
52/52 [==============================] - 1s 15ms/step - loss: 1.4193e-04 - val_loss: 0.0011
Epoch 32/50
52/52 [==============================] - 1s 14ms/step - loss: 1.5152e-04 - val_loss: 5.1045e-04
Epoch 33/50
52/52 [==============================] - 1s 15ms/step - loss: 1.6403e-04 - val_loss: 5.5809e-04
Epoch 34/50
52/52 [==============================] - 1s 14ms/step - loss: 1.5585e-04 - val_loss: 7.6255e-04
Epoch 35/50
52/52 [==============================] - 1s 15ms/step - loss: 1.3740e-04 - val_loss: 3.7841e-04
Epoch 36/50
52/52 [==============================] - 1s 16ms/step - loss: 1.3594e-04 - val_loss: 3.7049e-04
Epoch 37/50
52/52 [==============================] - 1s 14ms/step - loss: 1.2717e-04 - val_loss: 3.2862e-04
Epoch 38/50
52/52 [==============================] - 1s 14ms/step - loss: 1.3528e-04 - val_loss: 5.2115e-04
Epoch 39/50
52/52 [==============================] - 1s 15ms/step - loss: 1.4304e-04 - val_loss: 4.7663e-04
Epoch 40/50
52/52 [==============================] - 1s 14ms/step - loss: 1.5825e-04 - val_loss: 6.4982e-04
Epoch 41/50
52/52 [==============================] - 1s 14ms/step - loss: 1.5487e-04 - val_loss: 3.2640e-04
Epoch 42/50
52/52 [==============================] - 1s 15ms/step - loss: 1.4347e-04 - val_loss: 8.5829e-04
Epoch 43/50
52/52 [==============================] - 1s 14ms/step - loss: 1.4395e-04 - val_loss: 3.5205e-04
Epoch 44/50
52/52 [==============================] - 1s 14ms/step - loss: 1.4217e-04 - val_loss: 0.0014
Epoch 45/50
52/52 [==============================] - 1s 15ms/step - loss: 1.4853e-04 - val_loss: 3.3259e-04
Epoch 46/50
52/52 [==============================] - 1s 14ms/step - loss: 1.2728e-04 - val_loss: 3.6579e-04
Epoch 47/50
52/52 [==============================] - 1s 16ms/step - loss: 1.5584e-04 - val_loss: 4.4200e-04
Epoch 48/50
52/52 [==============================] - 1s 14ms/step - loss: 1.4581e-04 - val_loss: 5.8653e-04
Epoch 49/50
52/52 [==============================] - 1s 14ms/step - loss: 1.4888e-04 - val_loss: 0.0012
Epoch 50/50
52/52 [==============================] - 1s 15ms/step - loss: 1.5914e-04 - val_loss: 3.2938e-04
65/65 [==============================] - 0s 4ms/step - loss: 1.6892e-04
17/17 [==============================] - 0s 4ms/step - loss: 3.1023e-04
Train Loss: 0.00016892427811399102
Test Loss: 0.00031022829352878034
65/65 [==============================] - 1s 4ms/step
17/17 [==============================] - 0s 5ms/step
In [ ]:
# Mini model
model_GRU_Gold_mini = Sequential([
    GRU(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    GRU(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_GRU_Gold_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_GRU_Gold_mini.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

loss_GRU_Gold_mini_train = model_GRU_Gold_mini.evaluate(X_train_gold, y_train_gold)
loss_GRU_Gold_mini_test = model_GRU_Gold_mini.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_GRU_Gold_mini_train)
print("Test Loss:", loss_GRU_Gold_mini_test)

#Predict
pred_gold_train_GRU_mini = model_GRU_Gold_mini.predict(X_train_gold)
pred_gold_train_GRU_mini = pred_gold_train_GRU_mini.reshape(len(pred_gold_train_GRU_mini))

pred_gold_test_GRU_mini = model_GRU_Gold.predict(X_test_gold)
pred_gold_test_GRU_mini = pred_gold_test_GRU.reshape(len(pred_gold_test_GRU_mini))
Epoch 1/50
52/52 [==============================] - 2s 10ms/step - loss: 0.0243 - val_loss: 0.0065
Epoch 2/50
52/52 [==============================] - 0s 6ms/step - loss: 0.0025 - val_loss: 0.0043
Epoch 3/50
52/52 [==============================] - 0s 6ms/step - loss: 0.0016 - val_loss: 0.0040
Epoch 4/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0013 - val_loss: 0.0039
Epoch 5/50
52/52 [==============================] - 0s 6ms/step - loss: 0.0012 - val_loss: 0.0031
Epoch 6/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0011 - val_loss: 0.0023
Epoch 7/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0010 - val_loss: 0.0023
Epoch 8/50
52/52 [==============================] - 0s 5ms/step - loss: 9.6780e-04 - val_loss: 0.0021
Epoch 9/50
52/52 [==============================] - 0s 5ms/step - loss: 9.2250e-04 - val_loss: 0.0028
Epoch 10/50
52/52 [==============================] - 0s 6ms/step - loss: 8.7134e-04 - val_loss: 0.0020
Epoch 11/50
52/52 [==============================] - 0s 6ms/step - loss: 8.3295e-04 - val_loss: 0.0018
Epoch 12/50
52/52 [==============================] - 0s 5ms/step - loss: 7.9795e-04 - val_loss: 0.0020
Epoch 13/50
52/52 [==============================] - 0s 6ms/step - loss: 7.6709e-04 - val_loss: 0.0021
Epoch 14/50
52/52 [==============================] - 0s 6ms/step - loss: 7.3022e-04 - val_loss: 0.0016
Epoch 15/50
52/52 [==============================] - 0s 6ms/step - loss: 7.0813e-04 - val_loss: 0.0021
Epoch 16/50
52/52 [==============================] - 0s 6ms/step - loss: 6.7616e-04 - val_loss: 0.0018
Epoch 17/50
52/52 [==============================] - 0s 6ms/step - loss: 6.5563e-04 - val_loss: 0.0015
Epoch 18/50
52/52 [==============================] - 0s 6ms/step - loss: 6.2556e-04 - val_loss: 0.0019
Epoch 19/50
52/52 [==============================] - 0s 6ms/step - loss: 6.0056e-04 - val_loss: 0.0015
Epoch 20/50
52/52 [==============================] - 0s 6ms/step - loss: 5.7707e-04 - val_loss: 0.0020
Epoch 21/50
52/52 [==============================] - 0s 6ms/step - loss: 5.5677e-04 - val_loss: 0.0014
Epoch 22/50
52/52 [==============================] - 0s 6ms/step - loss: 5.3962e-04 - val_loss: 0.0019
Epoch 23/50
52/52 [==============================] - 0s 6ms/step - loss: 5.1811e-04 - val_loss: 0.0012
Epoch 24/50
52/52 [==============================] - 0s 6ms/step - loss: 5.0269e-04 - val_loss: 0.0014
Epoch 25/50
52/52 [==============================] - 0s 6ms/step - loss: 4.7857e-04 - val_loss: 0.0012
Epoch 26/50
52/52 [==============================] - 0s 6ms/step - loss: 4.6339e-04 - val_loss: 0.0013
Epoch 27/50
52/52 [==============================] - 0s 6ms/step - loss: 4.4549e-04 - val_loss: 0.0010
Epoch 28/50
52/52 [==============================] - 0s 6ms/step - loss: 4.2785e-04 - val_loss: 0.0012
Epoch 29/50
52/52 [==============================] - 0s 5ms/step - loss: 4.2250e-04 - val_loss: 0.0015
Epoch 30/50
52/52 [==============================] - 0s 6ms/step - loss: 4.0195e-04 - val_loss: 9.2061e-04
Epoch 31/50
52/52 [==============================] - 0s 6ms/step - loss: 3.8685e-04 - val_loss: 8.9673e-04
Epoch 32/50
52/52 [==============================] - 0s 5ms/step - loss: 3.7415e-04 - val_loss: 8.8604e-04
Epoch 33/50
52/52 [==============================] - 0s 5ms/step - loss: 3.6972e-04 - val_loss: 8.2028e-04
Epoch 34/50
52/52 [==============================] - 0s 5ms/step - loss: 3.4905e-04 - val_loss: 9.5116e-04
Epoch 35/50
52/52 [==============================] - 0s 5ms/step - loss: 3.4252e-04 - val_loss: 9.0119e-04
Epoch 36/50
52/52 [==============================] - 0s 6ms/step - loss: 3.3038e-04 - val_loss: 9.1026e-04
Epoch 37/50
52/52 [==============================] - 0s 6ms/step - loss: 3.1299e-04 - val_loss: 0.0010
Epoch 38/50
52/52 [==============================] - 0s 5ms/step - loss: 2.9831e-04 - val_loss: 6.4400e-04
Epoch 39/50
52/52 [==============================] - 0s 6ms/step - loss: 2.8455e-04 - val_loss: 8.3847e-04
Epoch 40/50
52/52 [==============================] - 0s 6ms/step - loss: 2.7315e-04 - val_loss: 6.8281e-04
Epoch 41/50
52/52 [==============================] - 0s 6ms/step - loss: 2.6209e-04 - val_loss: 6.0839e-04
Epoch 42/50
52/52 [==============================] - 0s 6ms/step - loss: 2.5318e-04 - val_loss: 6.3090e-04
Epoch 43/50
52/52 [==============================] - 0s 6ms/step - loss: 2.4613e-04 - val_loss: 5.8884e-04
Epoch 44/50
52/52 [==============================] - 0s 6ms/step - loss: 2.3533e-04 - val_loss: 5.3720e-04
Epoch 45/50
52/52 [==============================] - 0s 6ms/step - loss: 2.2778e-04 - val_loss: 5.4439e-04
Epoch 46/50
52/52 [==============================] - 0s 6ms/step - loss: 2.2428e-04 - val_loss: 5.4194e-04
Epoch 47/50
52/52 [==============================] - 0s 6ms/step - loss: 2.1619e-04 - val_loss: 6.4400e-04
Epoch 48/50
52/52 [==============================] - 0s 6ms/step - loss: 2.0680e-04 - val_loss: 4.4063e-04
Epoch 49/50
52/52 [==============================] - 0s 5ms/step - loss: 1.9730e-04 - val_loss: 4.3364e-04
Epoch 50/50
52/52 [==============================] - 0s 6ms/step - loss: 1.8923e-04 - val_loss: 4.8388e-04
65/65 [==============================] - 0s 2ms/step - loss: 2.4526e-04
17/17 [==============================] - 0s 2ms/step - loss: 4.3236e-04
Train Loss: 0.0002452561748214066
Test Loss: 0.00043236088822595775
65/65 [==============================] - 0s 2ms/step
17/17 [==============================] - 0s 4ms/step
In [ ]:
pred_gold_test_GRU = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_GRU, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_GRU = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_GRU, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_GRU.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_GRU.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
pred_gold_test_GRU_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_GRU_mini, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_GRU_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_GRU_mini, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_GRU_mini.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_GRU_mini.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted gold prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_train_GRU, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_gold_true, pred_gold_train_GRU))
No description has been provided for this image
Root Mean Squared Error (RMSE): 177.56613727807562
In [ ]:
# Plotting the actual vs. predicted gold prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_test_GRU, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_gold_true, pred_gold_test_GRU))
No description has been provided for this image
Root Mean Squared Error (RMSE): 326.0989669317497

Oil¶

In [ ]:
#Oil

# Define the model
model_GRU_Oil = Sequential([
    GRU(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    GRU(64, activation = 'relu', return_sequences=True),
    GRU(32, activation = 'relu', return_sequences=True),
    GRU(16,  activation = 'relu'),
    Dense(1)
])

# Compile the model
model_GRU_Oil.compile(optimizer='adam', loss='mse')

# Train the model
model_GRU_Oil.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_GRU_Oil_train = model_GRU_Oil.evaluate(X_train_oil, y_train_oil)
loss_GRU_Oil_test = model_GRU_Oil.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_GRU_Oil_train)
print("Test Loss:", loss_GRU_Oil_test)

#Predict
pred_oil_train_GRU = model_GRU_Oil.predict(X_train_oil)
pred_oil_train_GRU = pred_oil_train_GRU.reshape(len(pred_oil_train_GRU))

pred_oil_test_GRU = model_GRU_Oil.predict(X_test_oil)
pred_oil_test_GRU = pred_oil_test_GRU.reshape(len(pred_oil_test_GRU))
Epoch 1/50
63/63 [==============================] - 5s 28ms/step - loss: 0.0614 - val_loss: 0.0033
Epoch 2/50
63/63 [==============================] - 1s 20ms/step - loss: 0.0011 - val_loss: 8.3659e-04
Epoch 3/50
63/63 [==============================] - 1s 21ms/step - loss: 1.8713e-04 - val_loss: 6.3514e-04
Epoch 4/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7290e-04 - val_loss: 5.9665e-04
Epoch 5/50
63/63 [==============================] - 1s 21ms/step - loss: 1.7942e-04 - val_loss: 5.9023e-04
Epoch 6/50
63/63 [==============================] - 1s 21ms/step - loss: 1.7357e-04 - val_loss: 5.7566e-04
Epoch 7/50
63/63 [==============================] - 1s 21ms/step - loss: 1.6802e-04 - val_loss: 5.4835e-04
Epoch 8/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6906e-04 - val_loss: 5.5063e-04
Epoch 9/50
63/63 [==============================] - 1s 21ms/step - loss: 1.7363e-04 - val_loss: 5.7414e-04
Epoch 10/50
63/63 [==============================] - 1s 21ms/step - loss: 1.7175e-04 - val_loss: 5.4694e-04
Epoch 11/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7003e-04 - val_loss: 5.5423e-04
Epoch 12/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7602e-04 - val_loss: 5.5176e-04
Epoch 13/50
63/63 [==============================] - 1s 21ms/step - loss: 1.6991e-04 - val_loss: 5.6889e-04
Epoch 14/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7418e-04 - val_loss: 5.4792e-04
Epoch 15/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6944e-04 - val_loss: 5.6352e-04
Epoch 16/50
63/63 [==============================] - 1s 22ms/step - loss: 1.8408e-04 - val_loss: 5.4109e-04
Epoch 17/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6641e-04 - val_loss: 5.6678e-04
Epoch 18/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7319e-04 - val_loss: 5.5028e-04
Epoch 19/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7567e-04 - val_loss: 5.4490e-04
Epoch 20/50
63/63 [==============================] - 1s 21ms/step - loss: 1.6721e-04 - val_loss: 5.5876e-04
Epoch 21/50
63/63 [==============================] - 1s 21ms/step - loss: 1.7625e-04 - val_loss: 5.7697e-04
Epoch 22/50
63/63 [==============================] - 1s 20ms/step - loss: 1.7340e-04 - val_loss: 5.5881e-04
Epoch 23/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6688e-04 - val_loss: 5.4768e-04
Epoch 24/50
63/63 [==============================] - 1s 21ms/step - loss: 1.6425e-04 - val_loss: 5.3167e-04
Epoch 25/50
63/63 [==============================] - 1s 20ms/step - loss: 1.8584e-04 - val_loss: 5.2604e-04
Epoch 26/50
63/63 [==============================] - 1s 20ms/step - loss: 1.5685e-04 - val_loss: 5.3285e-04
Epoch 27/50
63/63 [==============================] - 1s 20ms/step - loss: 1.8377e-04 - val_loss: 5.9189e-04
Epoch 28/50
63/63 [==============================] - 1s 19ms/step - loss: 1.8360e-04 - val_loss: 5.3649e-04
Epoch 29/50
63/63 [==============================] - 1s 20ms/step - loss: 1.5631e-04 - val_loss: 5.2246e-04
Epoch 30/50
63/63 [==============================] - 1s 20ms/step - loss: 2.1042e-04 - val_loss: 5.2371e-04
Epoch 31/50
63/63 [==============================] - 1s 19ms/step - loss: 1.7577e-04 - val_loss: 5.5681e-04
Epoch 32/50
63/63 [==============================] - 2s 24ms/step - loss: 1.5731e-04 - val_loss: 5.6163e-04
Epoch 33/50
63/63 [==============================] - 1s 19ms/step - loss: 1.7730e-04 - val_loss: 5.1030e-04
Epoch 34/50
63/63 [==============================] - 1s 19ms/step - loss: 1.6183e-04 - val_loss: 5.4104e-04
Epoch 35/50
63/63 [==============================] - 1s 21ms/step - loss: 1.6560e-04 - val_loss: 5.1126e-04
Epoch 36/50
63/63 [==============================] - 1s 21ms/step - loss: 1.4726e-04 - val_loss: 5.3716e-04
Epoch 37/50
63/63 [==============================] - 1s 20ms/step - loss: 1.5360e-04 - val_loss: 5.1115e-04
Epoch 38/50
63/63 [==============================] - 1s 20ms/step - loss: 2.1446e-04 - val_loss: 5.3046e-04
Epoch 39/50
63/63 [==============================] - 1s 20ms/step - loss: 1.8628e-04 - val_loss: 5.4867e-04
Epoch 40/50
63/63 [==============================] - 1s 19ms/step - loss: 1.5356e-04 - val_loss: 4.9906e-04
Epoch 41/50
63/63 [==============================] - 1s 20ms/step - loss: 2.0153e-04 - val_loss: 5.3892e-04
Epoch 42/50
63/63 [==============================] - 1s 19ms/step - loss: 2.0606e-04 - val_loss: 5.5893e-04
Epoch 43/50
63/63 [==============================] - 1s 19ms/step - loss: 1.4928e-04 - val_loss: 5.0642e-04
Epoch 44/50
63/63 [==============================] - 1s 21ms/step - loss: 1.5247e-04 - val_loss: 5.0436e-04
Epoch 45/50
63/63 [==============================] - 1s 19ms/step - loss: 1.5946e-04 - val_loss: 4.8952e-04
Epoch 46/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6219e-04 - val_loss: 4.9704e-04
Epoch 47/50
63/63 [==============================] - 1s 19ms/step - loss: 1.5647e-04 - val_loss: 4.8868e-04
Epoch 48/50
63/63 [==============================] - 1s 19ms/step - loss: 1.4439e-04 - val_loss: 4.9553e-04
Epoch 49/50
63/63 [==============================] - 1s 20ms/step - loss: 1.6142e-04 - val_loss: 5.3097e-04
Epoch 50/50
63/63 [==============================] - 1s 20ms/step - loss: 1.5965e-04 - val_loss: 4.7618e-04
79/79 [==============================] - 0s 4ms/step - loss: 1.9575e-04
20/20 [==============================] - 0s 3ms/step - loss: 4.6915e-04
Train Loss: 0.0001957538624992594
Test Loss: 0.0004691465001087636
79/79 [==============================] - 1s 4ms/step
20/20 [==============================] - 0s 4ms/step
In [ ]:
# Mini model
model_GRU_Oil_mini = Sequential([
    GRU(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    GRU(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_GRU_Oil_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_GRU_Oil_mini.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

loss_GRU_Oil_mini_train = model_GRU_Oil_mini.evaluate(X_train_oil, y_train_oil)
loss_GRU_Oil_mini_test = model_GRU_Oil_mini.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_GRU_Oil_mini_train)
print("Test Loss:", loss_GRU_Oil_mini_test)

#Predict
pred_oil_train_GRU_mini = model_GRU_Oil_mini.predict(X_train_oil)
pred_oil_train_GRU_mini = pred_oil_train_GRU_mini.reshape(len(pred_oil_train_GRU_mini))

pred_oil_test_GRU_mini = model_GRU_Oil.predict(X_test_oil)
pred_oil_test_GRU_mini = pred_oil_test_GRU.reshape(len(pred_oil_test_GRU_mini))
Epoch 1/50
63/63 [==============================] - 3s 14ms/step - loss: 0.1133 - val_loss: 0.0111
Epoch 2/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0118 - val_loss: 0.0055
Epoch 3/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0047 - val_loss: 0.0017
Epoch 4/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0011 - val_loss: 6.2839e-04
Epoch 5/50
63/63 [==============================] - 0s 7ms/step - loss: 2.5626e-04 - val_loss: 4.3477e-04
Epoch 6/50
63/63 [==============================] - 0s 8ms/step - loss: 1.2890e-04 - val_loss: 4.0206e-04
Epoch 7/50
63/63 [==============================] - 0s 7ms/step - loss: 1.1009e-04 - val_loss: 4.0276e-04
Epoch 8/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0675e-04 - val_loss: 4.0148e-04
Epoch 9/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0495e-04 - val_loss: 3.9978e-04
Epoch 10/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0413e-04 - val_loss: 3.9955e-04
Epoch 11/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0363e-04 - val_loss: 3.9929e-04
Epoch 12/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0300e-04 - val_loss: 3.9948e-04
Epoch 13/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0303e-04 - val_loss: 3.9879e-04
Epoch 14/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0180e-04 - val_loss: 3.9998e-04
Epoch 15/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0145e-04 - val_loss: 4.0053e-04
Epoch 16/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0211e-04 - val_loss: 4.1047e-04
Epoch 17/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0137e-04 - val_loss: 4.0061e-04
Epoch 18/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0088e-04 - val_loss: 3.9940e-04
Epoch 19/50
63/63 [==============================] - 0s 7ms/step - loss: 1.0149e-04 - val_loss: 3.9803e-04
Epoch 20/50
63/63 [==============================] - 1s 8ms/step - loss: 1.0020e-04 - val_loss: 3.9762e-04
Epoch 21/50
63/63 [==============================] - 0s 7ms/step - loss: 9.9759e-05 - val_loss: 3.9738e-04
Epoch 22/50
63/63 [==============================] - 0s 8ms/step - loss: 9.8858e-05 - val_loss: 3.9658e-04
Epoch 23/50
63/63 [==============================] - 1s 10ms/step - loss: 9.8763e-05 - val_loss: 3.9664e-04
Epoch 24/50
63/63 [==============================] - 0s 8ms/step - loss: 9.8097e-05 - val_loss: 3.9649e-04
Epoch 25/50
63/63 [==============================] - 0s 7ms/step - loss: 9.7955e-05 - val_loss: 3.9640e-04
Epoch 26/50
63/63 [==============================] - 0s 7ms/step - loss: 9.9039e-05 - val_loss: 3.9545e-04
Epoch 27/50
63/63 [==============================] - 0s 7ms/step - loss: 9.7266e-05 - val_loss: 3.9542e-04
Epoch 28/50
63/63 [==============================] - 0s 7ms/step - loss: 9.7028e-05 - val_loss: 3.9662e-04
Epoch 29/50
63/63 [==============================] - 0s 7ms/step - loss: 9.6994e-05 - val_loss: 3.9453e-04
Epoch 30/50
63/63 [==============================] - 0s 7ms/step - loss: 9.7009e-05 - val_loss: 3.9511e-04
Epoch 31/50
63/63 [==============================] - 0s 7ms/step - loss: 9.9074e-05 - val_loss: 3.9690e-04
Epoch 32/50
63/63 [==============================] - 0s 8ms/step - loss: 9.8226e-05 - val_loss: 3.9778e-04
Epoch 33/50
63/63 [==============================] - 0s 8ms/step - loss: 9.5911e-05 - val_loss: 3.9627e-04
Epoch 34/50
63/63 [==============================] - 0s 8ms/step - loss: 9.5202e-05 - val_loss: 3.9354e-04
Epoch 35/50
63/63 [==============================] - 0s 7ms/step - loss: 9.4972e-05 - val_loss: 3.9605e-04
Epoch 36/50
63/63 [==============================] - 0s 7ms/step - loss: 9.4923e-05 - val_loss: 3.9072e-04
Epoch 37/50
63/63 [==============================] - 0s 8ms/step - loss: 9.4081e-05 - val_loss: 3.9586e-04
Epoch 38/50
63/63 [==============================] - 0s 7ms/step - loss: 9.6095e-05 - val_loss: 4.0424e-04
Epoch 39/50
63/63 [==============================] - 0s 8ms/step - loss: 9.7331e-05 - val_loss: 3.9113e-04
Epoch 40/50
63/63 [==============================] - 0s 8ms/step - loss: 9.3215e-05 - val_loss: 3.8968e-04
Epoch 41/50
63/63 [==============================] - 0s 7ms/step - loss: 9.4147e-05 - val_loss: 3.8948e-04
Epoch 42/50
63/63 [==============================] - 0s 8ms/step - loss: 9.4329e-05 - val_loss: 3.9005e-04
Epoch 43/50
63/63 [==============================] - 0s 8ms/step - loss: 9.3505e-05 - val_loss: 3.9394e-04
Epoch 44/50
63/63 [==============================] - 0s 7ms/step - loss: 9.3430e-05 - val_loss: 3.9962e-04
Epoch 45/50
63/63 [==============================] - 0s 7ms/step - loss: 9.2564e-05 - val_loss: 3.8696e-04
Epoch 46/50
63/63 [==============================] - 1s 9ms/step - loss: 9.0807e-05 - val_loss: 3.8600e-04
Epoch 47/50
63/63 [==============================] - 0s 7ms/step - loss: 9.2460e-05 - val_loss: 3.8647e-04
Epoch 48/50
63/63 [==============================] - 0s 6ms/step - loss: 9.4249e-05 - val_loss: 3.8530e-04
Epoch 49/50
63/63 [==============================] - 0s 7ms/step - loss: 9.0008e-05 - val_loss: 3.8524e-04
Epoch 50/50
63/63 [==============================] - 0s 7ms/step - loss: 9.0655e-05 - val_loss: 3.8756e-04
79/79 [==============================] - 0s 2ms/step - loss: 1.5159e-04
20/20 [==============================] - 0s 3ms/step - loss: 3.3093e-04
Train Loss: 0.00015158619498834014
Test Loss: 0.0003309290041215718
79/79 [==============================] - 0s 2ms/step
20/20 [==============================] - 0s 4ms/step
In [ ]:
pred_oil_test_GRU = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_GRU, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_GRU = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_GRU, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_GRU.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_GRU.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
pred_oil_test_GRU_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_GRU_mini, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_GRU_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_GRU_mini, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_GRU_mini.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_GRU_mini.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted oil prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_train_GRU, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_oil_true, pred_oil_train_GRU))
No description has been provided for this image
Root Mean Squared Error (RMSE): 5.094957974193358
In [ ]:
# Plotting the actual vs. predicted oil prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_test_GRU, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_oil_true, pred_oil_test_GRU))
No description has been provided for this image
Root Mean Squared Error (RMSE): 12.2106517858927

Using GRU model to predict stock price¶

Gold related stocks¶

In [ ]:
# Create the predictions
pred_barrick_GRU = model_GRU_Gold.predict(X_barrick)
pred_barrick_GRU = pred_barrick_GRU.reshape(len(pred_barrick_GRU))

pred_barrick_GRU_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_GRU, min(daily_barrick['Price']), max(daily_barrick['Price'])))

pred_barrick_GRU_true.set_index(y_barrick_true.index, inplace = True)
79/79 [==============================] - 0s 4ms/step
In [ ]:
# Plotting the actual vs. predicted barrick stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_barrick_true, label='Actual Price', color='blue')
plt.plot(pred_barrick_GRU_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barrick Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_barrick_true, pred_barrick_GRU_true))
print("Root Mean Squared Error of Scaled Data(RMSE):", mean_squared_error(y_barrick, pred_barrick_GRU))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 0.30069860622122874
Root Mean Squared Error of Scaled Data(RMSE): 0.0005207436399933778
In [ ]:
# Create the predictions
pred_newmont_GRU = model_GRU_Gold.predict(X_newmont)
pred_newmont_GRU = pred_newmont_GRU.reshape(len(pred_newmont_GRU))

pred_newmont_GRU_true = pd.DataFrame(inverse_minmax_scaling(pred_newmont_GRU, min(daily_newmont['Price']), max(daily_newmont['Price'])))

pred_newmont_GRU_true.set_index(y_newmont_true.index, inplace = True)
79/79 [==============================] - 0s 4ms/step
In [ ]:
# Plotting the actual vs. predicted newmont stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_newmont_true, label='Actual Price', color='blue')
plt.plot(pred_newmont_GRU_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Newmont Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_newmont_true, pred_newmont_GRU_true))
print("Root Mean Squared Error (RMSE):", mean_squared_error(y_newmont, pred_newmont_GRU))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 2.146889514625789
Root Mean Squared Error (RMSE): 0.0004350278627880642

Oil related stocks¶

In [ ]:
# Create the predictions
pred_chevron_GRU = model_GRU_Oil.predict(X_chevron)
pred_chevron_GRU = pred_chevron_GRU.reshape(len(pred_chevron_GRU))

pred_chevron_GRU_true = pd.DataFrame(inverse_minmax_scaling(pred_chevron_GRU, min(daily_chevron['Price']), max(daily_chevron['Price'])))

pred_chevron_GRU_true.set_index(y_chevron_true.index, inplace = True)
79/79 [==============================] - 0s 4ms/step
In [ ]:
# Plotting the actual vs. predicted chevron stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_chevron_true, label='Actual Price', color='blue')
plt.plot(pred_chevron_GRU_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Chevron Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_chevron_true, pred_chevron_GRU_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_chevron, pred_chevron_GRU))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 134.48601918569145
Root Mean Squared Error of Scaled Data (RMSE): 0.007508791745994149
In [ ]:
# Create the predictions
pred_exxon_GRU = model_GRU_Oil.predict(X_exxon)
pred_exxon_GRU = pred_exxon_GRU.reshape(len(pred_exxon_GRU))

pred_exxon_GRU_true = pd.DataFrame(inverse_minmax_scaling(pred_exxon_GRU, min(daily_exxon['Price']), max(daily_exxon['Price'])))

pred_exxon_GRU_true.set_index(y_exxon_true.index, inplace = True)
79/79 [==============================] - 0s 4ms/step
In [ ]:
# Plotting the actual vs. predicted exxon stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_exxon_true, label='Actual Price', color='blue')
plt.plot(pred_exxon_GRU_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Exxon Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_exxon_true, pred_exxon_GRU_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_exxon, pred_exxon_GRU))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 54.13942179655557
Root Mean Squared Error of Scaled Data (RMSE): 0.006873482163919057

LSTM¶

Gold¶

In [ ]:
#Gold

# Define the model
model_LSTM_Gold = Sequential([
    LSTM(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    LSTM(64, activation = 'relu', return_sequences=True),
    LSTM(32, activation = 'relu', return_sequences=True),
    LSTM(16, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_LSTM_Gold.compile(optimizer='adam', loss='mse')

# Train the model
model_LSTM_Gold.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_LSTM_Gold_train = model_LSTM_Gold.evaluate(X_train_gold, y_train_gold)
loss_LSTM_Gold_test = model_LSTM_Gold.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_LSTM_Gold_train)
print("Test Loss:", loss_LSTM_Gold_test)

#Predict
pred_gold_train_LSTM = model_LSTM_Gold.predict(X_train_gold)
pred_gold_train_LSTM = pred_gold_train_LSTM.reshape(len(pred_gold_train_LSTM))

pred_gold_test_LSTM = model_LSTM_Gold.predict(X_test_gold)
pred_gold_test_LSTM = pred_gold_test_LSTM.reshape(len(pred_gold_test_LSTM))
Epoch 1/50
52/52 [==============================] - 4s 27ms/step - loss: 0.0272 - val_loss: 0.3569
Epoch 2/50
52/52 [==============================] - 1s 17ms/step - loss: 0.0028 - val_loss: 0.1045
Epoch 3/50
52/52 [==============================] - 1s 18ms/step - loss: 9.8052e-04 - val_loss: 0.0538
Epoch 4/50
52/52 [==============================] - 1s 20ms/step - loss: 6.2415e-04 - val_loss: 0.0114
Epoch 5/50
52/52 [==============================] - 1s 18ms/step - loss: 5.1558e-04 - val_loss: 0.0102
Epoch 6/50
52/52 [==============================] - 1s 18ms/step - loss: 4.5810e-04 - val_loss: 0.0026
Epoch 7/50
52/52 [==============================] - 1s 20ms/step - loss: 4.6224e-04 - val_loss: 0.0054
Epoch 8/50
52/52 [==============================] - 1s 17ms/step - loss: 4.0837e-04 - val_loss: 0.0043
Epoch 9/50
52/52 [==============================] - 1s 19ms/step - loss: 4.4833e-04 - val_loss: 0.0028
Epoch 10/50
52/52 [==============================] - 1s 18ms/step - loss: 3.9006e-04 - val_loss: 0.0025
Epoch 11/50
52/52 [==============================] - 1s 18ms/step - loss: 3.8778e-04 - val_loss: 0.0012
Epoch 12/50
52/52 [==============================] - 1s 19ms/step - loss: 4.1060e-04 - val_loss: 0.0016
Epoch 13/50
52/52 [==============================] - 1s 17ms/step - loss: 3.6669e-04 - val_loss: 0.0012
Epoch 14/50
52/52 [==============================] - 1s 18ms/step - loss: 3.4248e-04 - val_loss: 0.0011
Epoch 15/50
52/52 [==============================] - 1s 18ms/step - loss: 3.4665e-04 - val_loss: 0.0018
Epoch 16/50
52/52 [==============================] - 1s 18ms/step - loss: 3.3572e-04 - val_loss: 8.5476e-04
Epoch 17/50
52/52 [==============================] - 1s 18ms/step - loss: 3.1402e-04 - val_loss: 7.7839e-04
Epoch 18/50
52/52 [==============================] - 1s 18ms/step - loss: 3.0866e-04 - val_loss: 0.0025
Epoch 19/50
52/52 [==============================] - 1s 18ms/step - loss: 2.8726e-04 - val_loss: 0.0010
Epoch 20/50
52/52 [==============================] - 1s 19ms/step - loss: 2.9796e-04 - val_loss: 7.5219e-04
Epoch 21/50
52/52 [==============================] - 1s 17ms/step - loss: 2.8979e-04 - val_loss: 5.6535e-04
Epoch 22/50
52/52 [==============================] - 1s 18ms/step - loss: 2.8520e-04 - val_loss: 6.3614e-04
Epoch 23/50
52/52 [==============================] - 1s 18ms/step - loss: 2.6603e-04 - val_loss: 0.0019
Epoch 24/50
52/52 [==============================] - 1s 20ms/step - loss: 2.4894e-04 - val_loss: 5.9503e-04
Epoch 25/50
52/52 [==============================] - 1s 18ms/step - loss: 2.5643e-04 - val_loss: 6.8575e-04
Epoch 26/50
52/52 [==============================] - 1s 18ms/step - loss: 2.4099e-04 - val_loss: 0.0010
Epoch 27/50
52/52 [==============================] - 1s 17ms/step - loss: 2.5045e-04 - val_loss: 0.0022
Epoch 28/50
52/52 [==============================] - 1s 18ms/step - loss: 2.1836e-04 - val_loss: 9.1007e-04
Epoch 29/50
52/52 [==============================] - 1s 18ms/step - loss: 2.1400e-04 - val_loss: 5.3342e-04
Epoch 30/50
52/52 [==============================] - 1s 17ms/step - loss: 2.0181e-04 - val_loss: 5.2313e-04
Epoch 31/50
52/52 [==============================] - 1s 18ms/step - loss: 2.4102e-04 - val_loss: 0.0013
Epoch 32/50
52/52 [==============================] - 1s 19ms/step - loss: 2.3350e-04 - val_loss: 5.5552e-04
Epoch 33/50
52/52 [==============================] - 1s 17ms/step - loss: 2.6307e-04 - val_loss: 6.9162e-04
Epoch 34/50
52/52 [==============================] - 1s 19ms/step - loss: 2.5836e-04 - val_loss: 0.0031
Epoch 35/50
52/52 [==============================] - 1s 17ms/step - loss: 2.5709e-04 - val_loss: 4.3930e-04
Epoch 36/50
52/52 [==============================] - 1s 18ms/step - loss: 1.9485e-04 - val_loss: 6.2504e-04
Epoch 37/50
52/52 [==============================] - 1s 19ms/step - loss: 1.7386e-04 - val_loss: 5.5646e-04
Epoch 38/50
52/52 [==============================] - 1s 16ms/step - loss: 1.9016e-04 - val_loss: 5.3477e-04
Epoch 39/50
52/52 [==============================] - 1s 18ms/step - loss: 2.1392e-04 - val_loss: 6.9945e-04
Epoch 40/50
52/52 [==============================] - 1s 19ms/step - loss: 2.2039e-04 - val_loss: 9.9890e-04
Epoch 41/50
52/52 [==============================] - 1s 18ms/step - loss: 1.9846e-04 - val_loss: 4.1715e-04
Epoch 42/50
52/52 [==============================] - 1s 18ms/step - loss: 1.6759e-04 - val_loss: 5.4972e-04
Epoch 43/50
52/52 [==============================] - 1s 18ms/step - loss: 1.7584e-04 - val_loss: 6.8080e-04
Epoch 44/50
52/52 [==============================] - 1s 17ms/step - loss: 1.8829e-04 - val_loss: 0.0039
Epoch 45/50
52/52 [==============================] - 1s 18ms/step - loss: 1.8286e-04 - val_loss: 5.0191e-04
Epoch 46/50
52/52 [==============================] - 1s 19ms/step - loss: 1.6285e-04 - val_loss: 8.0911e-04
Epoch 47/50
52/52 [==============================] - 1s 19ms/step - loss: 2.0138e-04 - val_loss: 4.4156e-04
Epoch 48/50
52/52 [==============================] - 1s 17ms/step - loss: 1.7134e-04 - val_loss: 0.0011
Epoch 49/50
52/52 [==============================] - 1s 17ms/step - loss: 1.6416e-04 - val_loss: 4.9648e-04
Epoch 50/50
52/52 [==============================] - 1s 18ms/step - loss: 1.6124e-04 - val_loss: 5.2292e-04
65/65 [==============================] - 0s 6ms/step - loss: 2.1212e-04
17/17 [==============================] - 1s 5ms/step - loss: 5.1962e-04
Train Loss: 0.00021211848070379347
Test Loss: 0.0005196167039684951
65/65 [==============================] - 1s 5ms/step
17/17 [==============================] - 1s 7ms/step
In [ ]:
# Mini model
model_LSTM_Gold_mini = Sequential([
    LSTM(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    LSTM(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_LSTM_Gold_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_LSTM_Gold_mini.fit(X_train_gold, y_train_gold, epochs=50, batch_size=32, validation_split=0.2)

loss_LSTM_Gold_mini_train = model_LSTM_Gold_mini.evaluate(X_train_gold, y_train_gold)
loss_LSTM_Gold_mini_test = model_LSTM_Gold_mini.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_LSTM_Gold_mini_train)
print("Test Loss:", loss_LSTM_Gold_mini_test)

#Predict
pred_gold_train_LSTM_mini = model_LSTM_Gold_mini.predict(X_train_gold)
pred_gold_train_LSTM_mini = pred_gold_train_LSTM_mini.reshape(len(pred_gold_train_LSTM_mini))

pred_gold_test_LSTM_mini = model_LSTM_Gold.predict(X_test_gold)
pred_gold_test_LSTM_mini = pred_gold_test_LSTM.reshape(len(pred_gold_test_LSTM_mini))
Epoch 1/50
52/52 [==============================] - 2s 11ms/step - loss: 0.0159 - val_loss: 0.1434
Epoch 2/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0071 - val_loss: 0.1130
Epoch 3/50
52/52 [==============================] - 0s 6ms/step - loss: 0.0044 - val_loss: 0.0571
Epoch 4/50
52/52 [==============================] - 0s 5ms/step - loss: 0.0018 - val_loss: 0.0154
Epoch 5/50
52/52 [==============================] - 0s 5ms/step - loss: 4.4746e-04 - val_loss: 0.0017
Epoch 6/50
52/52 [==============================] - 0s 5ms/step - loss: 2.0657e-04 - val_loss: 7.0938e-04
Epoch 7/50
52/52 [==============================] - 0s 6ms/step - loss: 1.9714e-04 - val_loss: 7.3387e-04
Epoch 8/50
52/52 [==============================] - 0s 5ms/step - loss: 1.9259e-04 - val_loss: 7.6704e-04
Epoch 9/50
52/52 [==============================] - 0s 5ms/step - loss: 1.9159e-04 - val_loss: 7.4141e-04
Epoch 10/50
52/52 [==============================] - 0s 6ms/step - loss: 1.8750e-04 - val_loss: 0.0011
Epoch 11/50
52/52 [==============================] - 0s 6ms/step - loss: 1.8678e-04 - val_loss: 0.0013
Epoch 12/50
52/52 [==============================] - 0s 5ms/step - loss: 1.8535e-04 - val_loss: 0.0011
Epoch 13/50
52/52 [==============================] - 0s 5ms/step - loss: 1.8296e-04 - val_loss: 0.0012
Epoch 14/50
52/52 [==============================] - 0s 6ms/step - loss: 1.8135e-04 - val_loss: 0.0013
Epoch 15/50
52/52 [==============================] - 0s 5ms/step - loss: 1.8008e-04 - val_loss: 0.0012
Epoch 16/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7912e-04 - val_loss: 0.0012
Epoch 17/50
52/52 [==============================] - 0s 5ms/step - loss: 1.8019e-04 - val_loss: 0.0013
Epoch 18/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7859e-04 - val_loss: 0.0013
Epoch 19/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7732e-04 - val_loss: 0.0016
Epoch 20/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7662e-04 - val_loss: 0.0011
Epoch 21/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7534e-04 - val_loss: 0.0017
Epoch 22/50
52/52 [==============================] - 0s 5ms/step - loss: 1.8239e-04 - val_loss: 0.0016
Epoch 23/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7535e-04 - val_loss: 0.0014
Epoch 24/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7830e-04 - val_loss: 0.0012
Epoch 25/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7450e-04 - val_loss: 0.0015
Epoch 26/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7370e-04 - val_loss: 0.0015
Epoch 27/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7139e-04 - val_loss: 0.0014
Epoch 28/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7211e-04 - val_loss: 0.0014
Epoch 29/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6920e-04 - val_loss: 0.0014
Epoch 30/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6909e-04 - val_loss: 0.0015
Epoch 31/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6891e-04 - val_loss: 0.0016
Epoch 32/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6854e-04 - val_loss: 0.0012
Epoch 33/50
52/52 [==============================] - 0s 5ms/step - loss: 1.7002e-04 - val_loss: 0.0015
Epoch 34/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6783e-04 - val_loss: 0.0013
Epoch 35/50
52/52 [==============================] - 0s 6ms/step - loss: 1.7509e-04 - val_loss: 0.0011
Epoch 36/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6667e-04 - val_loss: 0.0013
Epoch 37/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6678e-04 - val_loss: 9.9035e-04
Epoch 38/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6665e-04 - val_loss: 0.0019
Epoch 39/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6438e-04 - val_loss: 0.0011
Epoch 40/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6273e-04 - val_loss: 0.0010
Epoch 41/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6070e-04 - val_loss: 0.0016
Epoch 42/50
52/52 [==============================] - 0s 6ms/step - loss: 1.5909e-04 - val_loss: 0.0012
Epoch 43/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6109e-04 - val_loss: 0.0013
Epoch 44/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6013e-04 - val_loss: 0.0016
Epoch 45/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6004e-04 - val_loss: 0.0010
Epoch 46/50
52/52 [==============================] - 0s 6ms/step - loss: 1.6284e-04 - val_loss: 8.4828e-04
Epoch 47/50
52/52 [==============================] - 0s 5ms/step - loss: 1.6022e-04 - val_loss: 6.7616e-04
Epoch 48/50
52/52 [==============================] - 0s 6ms/step - loss: 1.5873e-04 - val_loss: 0.0013
Epoch 49/50
52/52 [==============================] - 0s 5ms/step - loss: 1.5612e-04 - val_loss: 9.2438e-04
Epoch 50/50
52/52 [==============================] - 0s 5ms/step - loss: 1.5481e-04 - val_loss: 9.9510e-04
65/65 [==============================] - 0s 2ms/step - loss: 3.2010e-04
17/17 [==============================] - 0s 3ms/step - loss: 0.0020
Train Loss: 0.00032009510323405266
Test Loss: 0.002044616499915719
65/65 [==============================] - 0s 2ms/step
17/17 [==============================] - 0s 6ms/step
In [ ]:
pred_gold_test_LSTM = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_LSTM, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_LSTM = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_LSTM, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_LSTM.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_LSTM.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
pred_gold_test_LSTM_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_LSTM_mini, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_LSTM_mini = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_LSTM_mini, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_LSTM_mini.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_LSTM_mini.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted gold prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_train_LSTM, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_gold_true, pred_gold_train_LSTM))
No description has been provided for this image
Root Mean Squared Error (RMSE): 222.97007473918532
In [ ]:
# Plotting the actual vs. predicted gold prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_test_LSTM, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_gold_true, pred_gold_test_LSTM))
No description has been provided for this image
Root Mean Squared Error (RMSE): 546.199361404031

Oil¶

In [ ]:
#Oil

# Define the model
model_LSTM_Oil = Sequential([
    LSTM(128, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    LSTM(64, activation = 'relu', return_sequences=True),
    LSTM(32, activation = 'relu', return_sequences=True),
    LSTM(16, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_LSTM_Oil.compile(optimizer='adam', loss='mse')

# Train the model
model_LSTM_Oil.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_LSTM_Oil_train = model_LSTM_Oil.evaluate(X_train_oil, y_train_oil)
loss_LSTM_Oil_test = model_LSTM_Oil.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_LSTM_Oil_train)
print("Test Loss:", loss_LSTM_Oil_test)

#Predict
pred_oil_train_LSTM = model_LSTM_Oil.predict(X_train_oil)
pred_oil_train_LSTM = pred_oil_train_LSTM.reshape(len(pred_oil_train_LSTM))

pred_oil_test_LSTM = model_LSTM_Oil.predict(X_test_oil)
pred_oil_test_LSTM = pred_oil_test_LSTM.reshape(len(pred_oil_test_LSTM))
Epoch 1/50
63/63 [==============================] - 6s 33ms/step - loss: 0.1976 - val_loss: 0.0011
Epoch 2/50
63/63 [==============================] - 1s 23ms/step - loss: 3.6771e-04 - val_loss: 5.9831e-04
Epoch 3/50
63/63 [==============================] - 1s 22ms/step - loss: 1.9963e-04 - val_loss: 5.9506e-04
Epoch 4/50
63/63 [==============================] - 1s 22ms/step - loss: 1.9534e-04 - val_loss: 5.9661e-04
Epoch 5/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0001e-04 - val_loss: 5.9894e-04
Epoch 6/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9369e-04 - val_loss: 6.0376e-04
Epoch 7/50
63/63 [==============================] - 1s 21ms/step - loss: 1.8983e-04 - val_loss: 5.9851e-04
Epoch 8/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9165e-04 - val_loss: 6.0438e-04
Epoch 9/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9674e-04 - val_loss: 6.1789e-04
Epoch 10/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9315e-04 - val_loss: 6.0454e-04
Epoch 11/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9822e-04 - val_loss: 6.2980e-04
Epoch 12/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0557e-04 - val_loss: 6.2308e-04
Epoch 13/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9537e-04 - val_loss: 6.2784e-04
Epoch 14/50
63/63 [==============================] - 1s 23ms/step - loss: 2.0166e-04 - val_loss: 6.1140e-04
Epoch 15/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9348e-04 - val_loss: 6.2674e-04
Epoch 16/50
63/63 [==============================] - 1s 21ms/step - loss: 2.1345e-04 - val_loss: 6.0631e-04
Epoch 17/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9599e-04 - val_loss: 6.4150e-04
Epoch 18/50
63/63 [==============================] - 1s 22ms/step - loss: 2.0277e-04 - val_loss: 6.1849e-04
Epoch 19/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0357e-04 - val_loss: 6.1109e-04
Epoch 20/50
63/63 [==============================] - 1s 22ms/step - loss: 1.9829e-04 - val_loss: 6.2844e-04
Epoch 21/50
63/63 [==============================] - 1s 21ms/step - loss: 2.1072e-04 - val_loss: 6.4360e-04
Epoch 22/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0545e-04 - val_loss: 6.2465e-04
Epoch 23/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9900e-04 - val_loss: 6.2636e-04
Epoch 24/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0287e-04 - val_loss: 6.1180e-04
Epoch 25/50
63/63 [==============================] - 1s 22ms/step - loss: 2.3346e-04 - val_loss: 6.0855e-04
Epoch 26/50
63/63 [==============================] - 1s 20ms/step - loss: 1.9359e-04 - val_loss: 6.1531e-04
Epoch 27/50
63/63 [==============================] - 1s 21ms/step - loss: 2.2737e-04 - val_loss: 6.9010e-04
Epoch 28/50
63/63 [==============================] - 1s 22ms/step - loss: 2.3248e-04 - val_loss: 6.5371e-04
Epoch 29/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0287e-04 - val_loss: 6.1158e-04
Epoch 30/50
63/63 [==============================] - 1s 21ms/step - loss: 2.8216e-04 - val_loss: 6.2073e-04
Epoch 31/50
63/63 [==============================] - 1s 23ms/step - loss: 2.1688e-04 - val_loss: 6.5789e-04
Epoch 32/50
63/63 [==============================] - 1s 21ms/step - loss: 1.9888e-04 - val_loss: 6.5073e-04
Epoch 33/50
63/63 [==============================] - 1s 21ms/step - loss: 2.3791e-04 - val_loss: 6.4697e-04
Epoch 34/50
63/63 [==============================] - 1s 22ms/step - loss: 2.0405e-04 - val_loss: 6.3067e-04
Epoch 35/50
63/63 [==============================] - 1s 22ms/step - loss: 2.3401e-04 - val_loss: 6.2064e-04
Epoch 36/50
63/63 [==============================] - 1s 22ms/step - loss: 2.0324e-04 - val_loss: 6.3981e-04
Epoch 37/50
63/63 [==============================] - 1s 21ms/step - loss: 2.0081e-04 - val_loss: 6.1841e-04
Epoch 38/50
63/63 [==============================] - 1s 22ms/step - loss: 2.7164e-04 - val_loss: 6.2307e-04
Epoch 39/50
63/63 [==============================] - 1s 22ms/step - loss: 2.2627e-04 - val_loss: 6.6156e-04
Epoch 40/50
63/63 [==============================] - 1s 23ms/step - loss: 2.2068e-04 - val_loss: 6.2872e-04
Epoch 41/50
63/63 [==============================] - 1s 22ms/step - loss: 2.6871e-04 - val_loss: 6.3628e-04
Epoch 42/50
63/63 [==============================] - 1s 22ms/step - loss: 2.7002e-04 - val_loss: 6.6401e-04
Epoch 43/50
63/63 [==============================] - 1s 21ms/step - loss: 2.1305e-04 - val_loss: 6.4398e-04
Epoch 44/50
63/63 [==============================] - 1s 22ms/step - loss: 2.2548e-04 - val_loss: 6.1689e-04
Epoch 45/50
63/63 [==============================] - 1s 21ms/step - loss: 2.2659e-04 - val_loss: 6.0663e-04
Epoch 46/50
63/63 [==============================] - 1s 22ms/step - loss: 2.6558e-04 - val_loss: 6.7181e-04
Epoch 47/50
63/63 [==============================] - 1s 22ms/step - loss: 2.2299e-04 - val_loss: 6.0980e-04
Epoch 48/50
63/63 [==============================] - 1s 23ms/step - loss: 2.1338e-04 - val_loss: 6.2221e-04
Epoch 49/50
63/63 [==============================] - 1s 22ms/step - loss: 2.6112e-04 - val_loss: 6.1160e-04
Epoch 50/50
63/63 [==============================] - 1s 22ms/step - loss: 2.6394e-04 - val_loss: 6.1151e-04
79/79 [==============================] - 0s 6ms/step - loss: 2.9633e-04
20/20 [==============================] - 0s 5ms/step - loss: 7.2082e-04
Train Loss: 0.00029633473604917526
Test Loss: 0.0007208150345832109
79/79 [==============================] - 1s 6ms/step
20/20 [==============================] - 0s 5ms/step
In [ ]:
# Mini model
model_LSTM_Oil_mini = Sequential([
    LSTM(3, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    LSTM(2, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_LSTM_Oil_mini.compile(optimizer='adam', loss='mse')

# Train the model
model_LSTM_Oil_mini.fit(X_train_oil, y_train_oil, epochs=50, batch_size=32, validation_split=0.2)

loss_LSTM_Oil_mini_train = model_LSTM_Oil_mini.evaluate(X_train_oil, y_train_oil)
loss_LSTM_Oil_mini_test = model_LSTM_Oil_mini.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_LSTM_Oil_mini_train)
print("Test Loss:", loss_LSTM_Oil_mini_test)

#Predict
pred_oil_train_LSTM_mini = model_LSTM_Oil_mini.predict(X_train_oil)
pred_oil_train_LSTM_mini = pred_oil_train_LSTM_mini.reshape(len(pred_oil_train_LSTM_mini))

pred_oil_test_LSTM_mini = model_LSTM_Oil.predict(X_test_oil)
pred_oil_test_LSTM_mini = pred_oil_test_LSTM.reshape(len(pred_oil_test_LSTM_mini))
Epoch 1/50
63/63 [==============================] - 2s 12ms/step - loss: 0.4142 - val_loss: 0.2421
Epoch 2/50
63/63 [==============================] - 0s 7ms/step - loss: 0.3415 - val_loss: 0.1885
Epoch 3/50
63/63 [==============================] - 0s 8ms/step - loss: 0.2795 - val_loss: 0.1441
Epoch 4/50
63/63 [==============================] - 0s 8ms/step - loss: 0.2270 - val_loss: 0.1081
Epoch 5/50
63/63 [==============================] - 0s 8ms/step - loss: 0.1831 - val_loss: 0.0794
Epoch 6/50
63/63 [==============================] - 0s 7ms/step - loss: 0.1469 - val_loss: 0.0570
Epoch 7/50
63/63 [==============================] - 0s 7ms/step - loss: 0.1174 - val_loss: 0.0401
Epoch 8/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0935 - val_loss: 0.0278
Epoch 9/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0747 - val_loss: 0.0191
Epoch 10/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0600 - val_loss: 0.0135
Epoch 11/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0487 - val_loss: 0.0102
Epoch 12/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0402 - val_loss: 0.0086
Epoch 13/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0339 - val_loss: 0.0083
Epoch 14/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0293 - val_loss: 0.0089
Epoch 15/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0261 - val_loss: 0.0100
Epoch 16/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0238 - val_loss: 0.0113
Epoch 17/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0223 - val_loss: 0.0127
Epoch 18/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0213 - val_loss: 0.0142
Epoch 19/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0206 - val_loss: 0.0155
Epoch 20/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0202 - val_loss: 0.0166
Epoch 21/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0199 - val_loss: 0.0176
Epoch 22/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0197 - val_loss: 0.0184
Epoch 23/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0197 - val_loss: 0.0192
Epoch 24/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0196 - val_loss: 0.0196
Epoch 25/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0196 - val_loss: 0.0200
Epoch 26/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0196 - val_loss: 0.0203
Epoch 27/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0205
Epoch 28/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0207
Epoch 29/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0208
Epoch 30/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0208
Epoch 31/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0209
Epoch 32/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 33/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 34/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 35/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 36/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 37/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0212
Epoch 38/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 39/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 40/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 41/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 42/50
63/63 [==============================] - 1s 8ms/step - loss: 0.0195 - val_loss: 0.0212
Epoch 43/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 44/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 45/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 46/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0210
Epoch 47/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 48/50
63/63 [==============================] - 0s 7ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 49/50
63/63 [==============================] - 0s 8ms/step - loss: 0.0195 - val_loss: 0.0211
Epoch 50/50
63/63 [==============================] - 1s 8ms/step - loss: 0.0195 - val_loss: 0.0210
79/79 [==============================] - 0s 2ms/step - loss: 0.0198
20/20 [==============================] - 0s 2ms/step - loss: 0.0157
Train Loss: 0.01982816867530346
Test Loss: 0.01568778231739998
79/79 [==============================] - 0s 2ms/step
20/20 [==============================] - 0s 4ms/step
In [ ]:
pred_oil_test_LSTM = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_LSTM, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_LSTM = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_LSTM, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_LSTM.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_LSTM.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
pred_oil_test_LSTM_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_LSTM_mini, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_LSTM_mini = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_LSTM_mini, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_LSTM_mini.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_LSTM_mini.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted oil prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_train_LSTM, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_oil_true, pred_oil_train_LSTM))
No description has been provided for this image
Root Mean Squared Error (RMSE): 7.712811203861746
In [ ]:
# Plotting the actual vs. predicted oil prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_test_LSTM, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_oil_true, pred_oil_test_LSTM))
No description has been provided for this image
Root Mean Squared Error (RMSE): 18.76091452878497

Using LSTM model to predict stock price¶

Gold related stocks¶

In [ ]:
# Create the predictions
pred_barrick_LSTM = model_LSTM_Gold.predict(X_barrick)
pred_barrick_LSTM = pred_barrick_LSTM.reshape(len(pred_barrick_LSTM))

pred_barrick_LSTM_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_LSTM, min(daily_barrick['Price']), max(daily_barrick['Price'])))

pred_barrick_LSTM_true.set_index(y_barrick_true.index, inplace = True)
79/79 [==============================] - 0s 6ms/step
In [ ]:
# Plotting the actual vs. predicted barrick stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_barrick_true, label='Actual Price', color='blue')
plt.plot(pred_barrick_LSTM_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barrick Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_barrick_true, pred_barrick_LSTM_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_barrick, pred_barrick_LSTM))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 0.3378447420500451
Root Mean Squared Error of Scaled Data (RMSE): 0.0005850725658192011
In [ ]:
# Create the predictions
pred_newmont_LSTM = model_LSTM_Gold.predict(X_newmont)
pred_newmont_LSTM = pred_newmont_LSTM.reshape(len(pred_newmont_LSTM))

pred_newmont_LSTM_true = pd.DataFrame(inverse_minmax_scaling(pred_newmont_LSTM, min(daily_newmont['Price']), max(daily_newmont['Price'])))

pred_newmont_LSTM_true.set_index(y_newmont_true.index, inplace = True)
79/79 [==============================] - 0s 5ms/step
In [ ]:
# Plotting the actual vs. predicted gold stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_newmont_true, label='Actual Price', color='blue')
plt.plot(pred_newmont_LSTM_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Newmont Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_newmont_true, pred_newmont_LSTM_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_newmont, pred_newmont_LSTM))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 2.677896752467707
Root Mean Squared Error of Scaled Data (RMSE): 0.0005426267165704885

Oil related stocks¶

In [ ]:
# Create the predictions
pred_chevron_LSTM = model_LSTM_Oil.predict(X_chevron)
pred_chevron_LSTM = pred_chevron_LSTM.reshape(len(pred_chevron_LSTM))

pred_chevron_LSTM_true = pd.DataFrame(inverse_minmax_scaling(pred_chevron_LSTM, min(daily_chevron['Price']), max(daily_chevron['Price'])))

pred_chevron_LSTM_true.set_index(y_chevron_true.index, inplace = True)
79/79 [==============================] - 0s 5ms/step
In [ ]:
# Plotting the actual vs. predicted gold stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_chevron_true, label='Actual Price', color='blue')
plt.plot(pred_chevron_LSTM_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Chevron Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_chevron_true, pred_chevron_LSTM_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_chevron, pred_chevron_LSTM))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 113.48787532277272
Root Mean Squared Error of Scaled Data (RMSE): 0.006336400540029202
In [ ]:
# Create the predictions
pred_exxon_LSTM = model_LSTM_Oil.predict(X_exxon)
pred_exxon_LSTM = pred_exxon_LSTM.reshape(len(pred_exxon_LSTM))

pred_exxon_LSTM_true = pd.DataFrame(inverse_minmax_scaling(pred_exxon_LSTM, min(daily_exxon['Price']), max(daily_exxon['Price'])))

pred_exxon_LSTM_true.set_index(y_exxon_true.index, inplace = True)
79/79 [==============================] - 0s 6ms/step
In [ ]:
# Plotting the actual vs. predicted exxon stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_exxon_true, label='Actual Price', color='blue')
plt.plot(pred_exxon_LSTM_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Exxon Stock Prices')
plt.legend()
plt.show()

print("Root Mean Squared Error of True Data (RMSE):", mean_squared_error(y_exxon_true, pred_exxon_LSTM_true))
print("Root Mean Squared Error of Scaled Data (RMSE):", mean_squared_error(y_exxon, pred_exxon_LSTM))
No description has been provided for this image
Root Mean Squared Error of True Data (RMSE): 78.72662729249295
Root Mean Squared Error of Scaled Data (RMSE): 0.009995050136592682
In [ ]:
daily_barrick
Out[ ]:
Price Open High Low Change %
Date
2024-01-11 17.15 17.38 17.49 16.94 -0.011
2024-01-10 17.34 17.44 17.52 17.20 -0.0034
2024-01-09 17.40 17.69 17.70 17.37 -0.0164
2024-01-08 17.69 17.39 17.92 17.31 0.0114
2024-01-05 17.49 17.44 17.72 17.26 0.0081
... ... ... ... ... ...
2014-01-17 18.66 18.34 18.79 18.34 0.0304
2014-01-16 18.11 18.09 18.20 17.85 0.0095
2014-01-15 17.94 17.54 18.03 17.48 0.0136
2014-01-14 17.70 17.99 18.16 17.59 -0.0205
2014-01-13 18.07 18.03 18.10 17.59 -0.0006

2517 rows × 5 columns

In [ ]:
model_RNN_Gold.predict([[
    [16.81, 16.86, 16.61, 0.0134],
    [16.62, 16.76, 16.54, 0.0048],
    [16.56, 16.88, 16.56, -0.0018],
    [16.85, 17.14, 16.80, 0.0185],
    [17.31, 17.31, 16.91, -0.0070]]])
1/1 [==============================] - 0s 27ms/step
Out[ ]:
array([[13.855299]], dtype=float32)
In [ ]:
model_GRU_Gold.predict([[
    [16.81, 16.86, 16.61, 0.0134],
    [16.62, 16.76, 16.54, 0.0048],
    [16.56, 16.88, 16.56, -0.0018],
    [16.85, 17.14, 16.80, 0.0185],
    [17.31, 17.31, 16.91, -0.0070]]])
1/1 [==============================] - 0s 25ms/step
Out[ ]:
array([[13.305249]], dtype=float32)
In [ ]:
model_LSTM_Gold.predict([[
    [16.81, 16.86, 16.61, 0.0134],
    [16.62, 16.76, 16.54, 0.0048],
    [16.56, 16.88, 16.56, -0.0018],
    [16.85, 17.14, 16.80, 0.0185],
    [17.31, 17.31, 16.91, -0.0070]]])
1/1 [==============================] - 0s 25ms/step
Out[ ]:
array([[-24.734232]], dtype=float32)
In [ ]:
daily_barrick_new = pd.read_csv("/work/New/GOLD Historical Data.csv")

daily_barrick_new = daily_barrick_new.drop("Vol.", axis=1)

daily_barrick_new.iloc[:, 1:] = daily_barrick_new.iloc[:, 1:].replace({',': '', '%': ''}, regex=True).astype(float)

daily_barrick_new.iloc[:, -1] *= 0.01

daily_barrick_new['Date'] = pd.to_datetime(daily_barrick_new['Date'], format='%m/%d/%Y', errors='coerce')

daily_barrick_new.set_index('Date', inplace=True)
In [ ]:
daily_barrick_new_scaled = pd.DataFrame()
daily_barrick_new_scaled[['Open', 'High', 'Low', 'Change %','Price']] = scaler.fit_transform(daily_barrick_new[['Open', 'High', 'Low', 'Change %','Price']])
daily_barrick_new_time = daily_barrick_new.index
daily_barrick_new_scaled.set_index(daily_barrick_new_time,inplace = True)
daily_barrick_new_scaled = daily_barrick_new_scaled[::-1]
In [ ]:
X_barrick_new, y_barrick_new = create_sequences(daily_barrick_new_scaled, window_size)
y_barrick_new = pd.DataFrame(y_barrick_new).set_index(daily_barrick_new_scaled.index[window_size:len(daily_barrick_new_scaled)])
In [ ]:
# Create the predictions
pred_barrick_new_RNN = model_RNN_Gold.predict(X_barrick_new)
pred_barrick_new_RNN = pred_barrick_new_RNN.reshape(len(pred_barrick_new_RNN))

y_barrick_new_true = pd.DataFrame(inverse_minmax_scaling(y_barrick_new, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))
pred_barrick_new_RNN_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_new_RNN, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))

pred_barrick_new_RNN_true.set_index(y_barrick_new_true.index, inplace = True)
3/3 [==============================] - 0s 3ms/step
In [ ]:
# Plotting the actual vs. predicted barrick_new stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_barrick_new_true, label='Actual Price', color='blue')
plt.plot(pred_barrick_new_RNN_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barricks Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_barrick_new_true, pred_barrick_new_RNN_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_barrick_new, pred_barrick_new_RNN))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 0.1988411271012848
Mean Squared Error of Scaled Data (MSE): 0.012552797443121322
In [ ]:
# Create the predictions
pred_barrick_new_LSTM = model_LSTM_Gold.predict(X_barrick_new)
pred_barrick_new_LSTM = pred_barrick_new_LSTM.reshape(len(pred_barrick_new_LSTM))

y_barrick_new_true = pd.DataFrame(inverse_minmax_scaling(y_barrick_new, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))
pred_barrick_new_LSTM_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_new_LSTM, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))

pred_barrick_new_LSTM_true.set_index(y_barrick_new_true.index, inplace = True)
3/3 [==============================] - 0s 3ms/step
In [ ]:
# Plotting the actual vs. predicted barrick_new stock prices
plt.figure(figsize=(10, 6))
plt.plot(y_barrick_new_true, label='Actual Price', color='blue')
plt.plot(pred_barrick_new_LSTM_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barricks Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_barrick_new_true, pred_barrick_new_LSTM_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_barrick_new, pred_barrick_new_LSTM))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 0.21600468451718333
Mean Squared Error of Scaled Data (MSE): 0.013636325821911894
In [ ]:
# Create the predictions
pred_barrick_new_GRU = model_GRU_Gold.predict(X_barrick_new)
pred_barrick_new_GRU = pred_barrick_new_GRU.reshape(len(pred_barrick_new_GRU))

y_barrick_new_true = pd.DataFrame(inverse_minmax_scaling(y_barrick_new, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))
pred_barrick_new_GRU_true = pd.DataFrame(inverse_minmax_scaling(pred_barrick_new_GRU, min(daily_barrick_new['Price']), max(daily_barrick_new['Price'])))

pred_barrick_new_GRU_true.set_index(y_barrick_new_true.index, inplace = True)
3/3 [==============================] - 0s 3ms/step
In [ ]:
# Plotting the actual vs. predicted barrick_new stock prices
plt.figure(figsize=(10, 6))
plt.plot(pred_barrick_new_GRU_true, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Barricks Stock Prices (Training Data)')
plt.legend()
plt.show()

print("Mean Squared Error of True Data (MSE):", mean_squared_error(y_barrick_new_true, pred_barrick_new_GRU_true))
print("Mean Squared Error of Scaled Data (MSE):", mean_squared_error(y_barrick_new, pred_barrick_new_GRU))
No description has been provided for this image
Mean Squared Error of True Data (MSE): 0.1789201575898233
Mean Squared Error of Scaled Data (MSE): 0.011295188271431191
In [ ]:
# Normalize


# Compute correlations
gold_barrick_correlation = correlate(daily_gold['Price'], daily_barrick['Price'], mode='full')
gold_newmont_correlation = correlate(daily_gold['Price'], daily_newmont['Price'], mode='full')
oil_chevron_correlation = correlate(daily_oil['Price'], daily_chevron['Price'], mode='full')
oil_exxon_correlation = correlate(daily_oil['Price'], daily_exxon['Price'], mode='full')

# Compute lags
lags_gold_barrick = np.arange(-len(daily_gold['Price']) + 1, len(daily_barrick['Price']))
lags_gold_newmont = np.arange(-len(daily_gold['Price']) + 1, len(daily_newmont['Price']))
lags_oil_chevron = np.arange(-len(daily_oil['Price']) + 1, len(daily_chevron['Price']))
lags_oil_exxon = np.arange(-len(daily_oil['Price']) + 1, len(daily_exxon['Price']))

# Create subplots
fig, axs = plt.subplots(2, 2)

# Plot each correlation on its respective subplot
axs[0, 0].plot(lags_gold_barrick, gold_barrick_correlation)
axs[0, 0].set_title('Gold-Barrick Correlation')
axs[0, 0].set_xlabel('Lag')
axs[0, 0].set_ylabel('Cross-Correlation')
axs[0, 0].grid(True)

axs[0, 1].plot(lags_gold_newmont, gold_newmont_correlation)
axs[0, 1].set_title('Gold-Newmont Correlation')
axs[0, 1].set_xlabel('Lag')
axs[0, 1].set_ylabel('Cross-Correlation')
axs[0, 1].grid(True)

axs[1, 0].plot(lags_oil_chevron, oil_chevron_correlation)
axs[1, 0].set_title('Oil-Chevron Correlation')
axs[1, 0].set_xlabel('Lag')
axs[1, 0].set_ylabel('Cross-Correlation')
axs[1, 0].grid(True)

axs[1, 1].plot(lags_oil_exxon, oil_exxon_correlation)
axs[1, 1].set_title('Oil-Exxon Correlation')
axs[1, 1].set_xlabel('Lag')
axs[1, 1].set_ylabel('Cross-Correlation')
axs[1, 1].grid(True)

# Adjust layout to prevent overlap
plt.tight_layout()

# Show the plot
plt.show()
No description has been provided for this image

Modifying Phase¶

In [ ]:
# RNN 

# Gold

# Same layer amount, less neurons

# Define the model
model_RNN_Gold_revamp_1 = Sequential([
    SimpleRNN(64, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(32, activation = 'relu', return_sequences=True),
    SimpleRNN(16, activation = 'relu', return_sequences=True),
    SimpleRNN(8, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Gold_revamp_1.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Gold_revamp_1.fit(X_train_gold, y_train_gold, epochs=100, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_RNN_Gold_train_revamp_1 = model_RNN_Gold_revamp_1.evaluate(X_train_gold, y_train_gold)
loss_RNN_Gold_test_revamp_1 = model_RNN_Gold_revamp_1.evaluate(X_test_gold, y_test_gold)
print("Train Loss:", loss_RNN_Gold_train_revamp_1)
print("Test Loss:", loss_RNN_Gold_test_revamp_1)

#Predict
pred_gold_train_RNN_revamp_1 = model_RNN_Gold_revamp_1.predict(X_train_gold)
pred_gold_train_RNN_revamp_1 = pred_gold_train_RNN_revamp_1.reshape(len(pred_gold_train_RNN_revamp_1))

pred_gold_test_RNN_revamp_1 = model_RNN_Gold_revamp_1.predict(X_test_gold)
pred_gold_test_RNN_revamp_1 = pred_gold_test_RNN_revamp_1.reshape(len(pred_gold_test_RNN_revamp_1))
Epoch 1/100
52/52 [==============================] - 2s 15ms/step - loss: 0.0030 - val_loss: 0.0011
Epoch 2/100
52/52 [==============================] - 0s 7ms/step - loss: 3.7750e-04 - val_loss: 0.0014
Epoch 3/100
52/52 [==============================] - 0s 8ms/step - loss: 2.7064e-04 - val_loss: 5.8901e-04
Epoch 4/100
52/52 [==============================] - 0s 8ms/step - loss: 2.1784e-04 - val_loss: 5.8468e-04
Epoch 5/100
52/52 [==============================] - 0s 8ms/step - loss: 1.9360e-04 - val_loss: 4.9176e-04
Epoch 6/100
52/52 [==============================] - 0s 7ms/step - loss: 1.8854e-04 - val_loss: 7.7613e-04
Epoch 7/100
52/52 [==============================] - 0s 7ms/step - loss: 1.7613e-04 - val_loss: 4.5694e-04
Epoch 8/100
52/52 [==============================] - 0s 8ms/step - loss: 1.6395e-04 - val_loss: 9.3578e-04
Epoch 9/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4513e-04 - val_loss: 6.5197e-04
Epoch 10/100
52/52 [==============================] - 0s 7ms/step - loss: 2.0966e-04 - val_loss: 4.5713e-04
Epoch 11/100
52/52 [==============================] - 0s 7ms/step - loss: 1.5820e-04 - val_loss: 4.1997e-04
Epoch 12/100
52/52 [==============================] - 0s 7ms/step - loss: 1.5691e-04 - val_loss: 7.2759e-04
Epoch 13/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4955e-04 - val_loss: 7.3522e-04
Epoch 14/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3976e-04 - val_loss: 4.3610e-04
Epoch 15/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4325e-04 - val_loss: 4.7652e-04
Epoch 16/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4168e-04 - val_loss: 6.2980e-04
Epoch 17/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3437e-04 - val_loss: 5.6721e-04
Epoch 18/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3778e-04 - val_loss: 3.8782e-04
Epoch 19/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2859e-04 - val_loss: 8.1174e-04
Epoch 20/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2584e-04 - val_loss: 4.7341e-04
Epoch 21/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3502e-04 - val_loss: 8.0608e-04
Epoch 22/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4224e-04 - val_loss: 4.5182e-04
Epoch 23/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4176e-04 - val_loss: 5.4114e-04
Epoch 24/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4563e-04 - val_loss: 4.4225e-04
Epoch 25/100
52/52 [==============================] - 0s 8ms/step - loss: 1.5799e-04 - val_loss: 4.4436e-04
Epoch 26/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3404e-04 - val_loss: 4.2894e-04
Epoch 27/100
52/52 [==============================] - 0s 9ms/step - loss: 1.4029e-04 - val_loss: 4.9854e-04
Epoch 28/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3010e-04 - val_loss: 8.8597e-04
Epoch 29/100
52/52 [==============================] - 0s 8ms/step - loss: 1.7118e-04 - val_loss: 0.0010
Epoch 30/100
52/52 [==============================] - 0s 8ms/step - loss: 1.5390e-04 - val_loss: 7.7615e-04
Epoch 31/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2824e-04 - val_loss: 4.4276e-04
Epoch 32/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2838e-04 - val_loss: 4.3946e-04
Epoch 33/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2843e-04 - val_loss: 5.1398e-04
Epoch 34/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3861e-04 - val_loss: 7.3534e-04
Epoch 35/100
52/52 [==============================] - 0s 8ms/step - loss: 1.7103e-04 - val_loss: 0.0011
Epoch 36/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4986e-04 - val_loss: 5.1438e-04
Epoch 37/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3860e-04 - val_loss: 4.9288e-04
Epoch 38/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3202e-04 - val_loss: 4.6321e-04
Epoch 39/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2199e-04 - val_loss: 0.0014
Epoch 40/100
52/52 [==============================] - 0s 7ms/step - loss: 1.5911e-04 - val_loss: 6.2026e-04
Epoch 41/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4222e-04 - val_loss: 6.9826e-04
Epoch 42/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2700e-04 - val_loss: 4.5063e-04
Epoch 43/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2242e-04 - val_loss: 7.0126e-04
Epoch 44/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2833e-04 - val_loss: 8.6754e-04
Epoch 45/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3294e-04 - val_loss: 4.9048e-04
Epoch 46/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2628e-04 - val_loss: 4.5758e-04
Epoch 47/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4441e-04 - val_loss: 0.0014
Epoch 48/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3961e-04 - val_loss: 8.7272e-04
Epoch 49/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3447e-04 - val_loss: 5.8576e-04
Epoch 50/100
52/52 [==============================] - 0s 8ms/step - loss: 1.6756e-04 - val_loss: 9.3221e-04
Epoch 51/100
52/52 [==============================] - 0s 8ms/step - loss: 1.1932e-04 - val_loss: 4.4127e-04
Epoch 52/100
52/52 [==============================] - 0s 9ms/step - loss: 1.2465e-04 - val_loss: 5.2823e-04
Epoch 53/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3951e-04 - val_loss: 5.7103e-04
Epoch 54/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3085e-04 - val_loss: 7.4860e-04
Epoch 55/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3569e-04 - val_loss: 4.3840e-04
Epoch 56/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2104e-04 - val_loss: 4.9323e-04
Epoch 57/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3585e-04 - val_loss: 7.4050e-04
Epoch 58/100
52/52 [==============================] - 0s 7ms/step - loss: 1.5140e-04 - val_loss: 6.4353e-04
Epoch 59/100
52/52 [==============================] - 0s 7ms/step - loss: 1.5255e-04 - val_loss: 6.0384e-04
Epoch 60/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3592e-04 - val_loss: 0.0013
Epoch 61/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3229e-04 - val_loss: 9.6320e-04
Epoch 62/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3720e-04 - val_loss: 7.0430e-04
Epoch 63/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3017e-04 - val_loss: 6.9850e-04
Epoch 64/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3568e-04 - val_loss: 6.8685e-04
Epoch 65/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4671e-04 - val_loss: 8.7238e-04
Epoch 66/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3757e-04 - val_loss: 5.7360e-04
Epoch 67/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3713e-04 - val_loss: 5.4820e-04
Epoch 68/100
52/52 [==============================] - 0s 9ms/step - loss: 1.2198e-04 - val_loss: 8.5958e-04
Epoch 69/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2724e-04 - val_loss: 0.0020
Epoch 70/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4816e-04 - val_loss: 5.2325e-04
Epoch 71/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3250e-04 - val_loss: 5.0955e-04
Epoch 72/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3656e-04 - val_loss: 6.9488e-04
Epoch 73/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2935e-04 - val_loss: 6.8371e-04
Epoch 74/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2559e-04 - val_loss: 6.6757e-04
Epoch 75/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3880e-04 - val_loss: 5.9797e-04
Epoch 76/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3763e-04 - val_loss: 8.4695e-04
Epoch 77/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3373e-04 - val_loss: 5.4790e-04
Epoch 78/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2276e-04 - val_loss: 6.5321e-04
Epoch 79/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2166e-04 - val_loss: 0.0011
Epoch 80/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2732e-04 - val_loss: 5.5752e-04
Epoch 81/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2755e-04 - val_loss: 7.3407e-04
Epoch 82/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4742e-04 - val_loss: 0.0011
Epoch 83/100
52/52 [==============================] - 0s 8ms/step - loss: 1.2662e-04 - val_loss: 4.6915e-04
Epoch 84/100
52/52 [==============================] - 0s 9ms/step - loss: 1.2972e-04 - val_loss: 7.2835e-04
Epoch 85/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2972e-04 - val_loss: 5.8496e-04
Epoch 86/100
52/52 [==============================] - 0s 9ms/step - loss: 1.3990e-04 - val_loss: 5.2053e-04
Epoch 87/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2551e-04 - val_loss: 7.4098e-04
Epoch 88/100
52/52 [==============================] - 0s 8ms/step - loss: 1.1829e-04 - val_loss: 7.1281e-04
Epoch 89/100
52/52 [==============================] - 0s 8ms/step - loss: 1.3248e-04 - val_loss: 5.1436e-04
Epoch 90/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3170e-04 - val_loss: 5.9602e-04
Epoch 91/100
52/52 [==============================] - 0s 7ms/step - loss: 1.4456e-04 - val_loss: 4.6901e-04
Epoch 92/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3948e-04 - val_loss: 7.8865e-04
Epoch 93/100
52/52 [==============================] - 0s 8ms/step - loss: 1.4888e-04 - val_loss: 6.6084e-04
Epoch 94/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3243e-04 - val_loss: 5.7254e-04
Epoch 95/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3672e-04 - val_loss: 5.8638e-04
Epoch 96/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2281e-04 - val_loss: 7.0927e-04
Epoch 97/100
52/52 [==============================] - 0s 7ms/step - loss: 1.3752e-04 - val_loss: 5.8364e-04
Epoch 98/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2343e-04 - val_loss: 5.2904e-04
Epoch 99/100
52/52 [==============================] - 0s 7ms/step - loss: 1.2352e-04 - val_loss: 6.5065e-04
Epoch 100/100
52/52 [==============================] - 0s 7ms/step - loss: 1.1733e-04 - val_loss: 6.4456e-04
65/65 [==============================] - 0s 2ms/step - loss: 2.2026e-04
17/17 [==============================] - 0s 3ms/step - loss: 5.1528e-04
Train Loss: 0.00022025506768841296
Test Loss: 0.000515282095875591
65/65 [==============================] - 0s 2ms/step
17/17 [==============================] - 0s 2ms/step
In [ ]:
pred_gold_test_RNN_revamp_1 = pd.DataFrame(inverse_minmax_scaling(pred_gold_test_RNN_revamp_1, min(daily_gold['Price']), max(daily_gold['Price'])))
pred_gold_train_RNN_revamp_1 = pd.DataFrame(inverse_minmax_scaling(pred_gold_train_RNN_revamp_1, min(daily_gold['Price']), max(daily_gold['Price'])))

pred_gold_train_RNN_revamp_1.set_index(y_train_gold_true.index, inplace = True)
pred_gold_test_RNN_revamp_1.set_index(y_test_gold_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted gold prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_train_RNN_revamp_1, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_gold_true, pred_gold_train_RNN_revamp_1))
No description has been provided for this image
Root Mean Squared Error (RMSE): 231.52286383192916
In [ ]:
# Plotting the actual vs. predicted gold prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_gold_true, label='Actual Price', color='blue')
plt.plot(pred_gold_test_RNN_revamp_1, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Gold Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_gold_true, pred_gold_test_RNN_revamp_1))
No description has been provided for this image
Root Mean Squared Error (RMSE): 541.6430327994968
In [ ]:
 
In [ ]:
# RNN 

# Oil

# Same layer amount, less neurons

# Define the model
model_RNN_Oil_revamp_1 = Sequential([
    SimpleRNN(64, activation = 'relu', return_sequences=True, input_shape=(window_size, len(features))),
    SimpleRNN(32, activation = 'relu', return_sequences=True),
    SimpleRNN(16, activation = 'relu', return_sequences=True),
    SimpleRNN(8, activation = 'relu'),
    Dense(1)
])

# Compile the model
model_RNN_Oil_revamp_1.compile(optimizer='adam', loss='mse')

# Train the model
model_RNN_Oil_revamp_1.fit(X_train_oil, y_train_oil, epochs=100, batch_size=32, validation_split=0.2)

# Evaluate the model
loss_RNN_Oil_train_revamp_1 = model_RNN_Oil_revamp_1.evaluate(X_train_oil, y_train_oil)
loss_RNN_Oil_test_revamp_1 = model_RNN_Oil_revamp_1.evaluate(X_test_oil, y_test_oil)
print("Train Loss:", loss_RNN_Oil_train_revamp_1)
print("Test Loss:", loss_RNN_Oil_test_revamp_1)

#Predict
pred_oil_train_RNN_revamp_1 = model_RNN_Oil_revamp_1.predict(X_train_oil)
pred_oil_train_RNN_revamp_1 = pred_oil_train_RNN_revamp_1.reshape(len(pred_oil_train_RNN_revamp_1))

pred_oil_test_RNN_revamp_1 = model_RNN_Oil_revamp_1.predict(X_test_oil)
pred_oil_test_RNN_revamp_1 = pred_oil_test_RNN_revamp_1.reshape(len(pred_oil_test_RNN_revamp_1))
Epoch 1/100
63/63 [==============================] - 3s 15ms/step - loss: 0.0638 - val_loss: 6.7232e-04
Epoch 2/100
63/63 [==============================] - 1s 10ms/step - loss: 2.3840e-04 - val_loss: 5.4754e-04
Epoch 3/100
63/63 [==============================] - 1s 11ms/step - loss: 1.5662e-04 - val_loss: 5.4554e-04
Epoch 4/100
63/63 [==============================] - 1s 9ms/step - loss: 1.5248e-04 - val_loss: 5.5798e-04
Epoch 5/100
63/63 [==============================] - 1s 10ms/step - loss: 1.5357e-04 - val_loss: 5.3967e-04
Epoch 6/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4882e-04 - val_loss: 5.3584e-04
Epoch 7/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4750e-04 - val_loss: 5.2515e-04
Epoch 8/100
63/63 [==============================] - 1s 10ms/step - loss: 1.5064e-04 - val_loss: 5.2679e-04
Epoch 9/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4779e-04 - val_loss: 5.1946e-04
Epoch 10/100
63/63 [==============================] - 1s 10ms/step - loss: 1.5009e-04 - val_loss: 5.1850e-04
Epoch 11/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4703e-04 - val_loss: 5.2555e-04
Epoch 12/100
63/63 [==============================] - 1s 10ms/step - loss: 1.5513e-04 - val_loss: 5.1089e-04
Epoch 13/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4607e-04 - val_loss: 5.2049e-04
Epoch 14/100
63/63 [==============================] - 1s 9ms/step - loss: 1.4589e-04 - val_loss: 5.0663e-04
Epoch 15/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4202e-04 - val_loss: 5.1170e-04
Epoch 16/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4527e-04 - val_loss: 5.0634e-04
Epoch 17/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4084e-04 - val_loss: 5.0880e-04
Epoch 18/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3713e-04 - val_loss: 5.1952e-04
Epoch 19/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3934e-04 - val_loss: 5.0939e-04
Epoch 20/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3615e-04 - val_loss: 5.0017e-04
Epoch 21/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4870e-04 - val_loss: 4.9616e-04
Epoch 22/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3167e-04 - val_loss: 4.9043e-04
Epoch 23/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3189e-04 - val_loss: 4.9529e-04
Epoch 24/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3458e-04 - val_loss: 4.9184e-04
Epoch 25/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3922e-04 - val_loss: 4.8486e-04
Epoch 26/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3100e-04 - val_loss: 4.9630e-04
Epoch 27/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3323e-04 - val_loss: 5.2754e-04
Epoch 28/100
63/63 [==============================] - 1s 11ms/step - loss: 1.3409e-04 - val_loss: 4.8457e-04
Epoch 29/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3314e-04 - val_loss: 4.8364e-04
Epoch 30/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3296e-04 - val_loss: 4.9547e-04
Epoch 31/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4074e-04 - val_loss: 4.7654e-04
Epoch 32/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3838e-04 - val_loss: 4.7515e-04
Epoch 33/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2661e-04 - val_loss: 4.7525e-04
Epoch 34/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2641e-04 - val_loss: 5.0910e-04
Epoch 35/100
63/63 [==============================] - 1s 9ms/step - loss: 1.3347e-04 - val_loss: 4.8137e-04
Epoch 36/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2257e-04 - val_loss: 4.7089e-04
Epoch 37/100
63/63 [==============================] - 1s 11ms/step - loss: 1.2263e-04 - val_loss: 4.6779e-04
Epoch 38/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4019e-04 - val_loss: 4.6970e-04
Epoch 39/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3099e-04 - val_loss: 4.9983e-04
Epoch 40/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2248e-04 - val_loss: 4.6924e-04
Epoch 41/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2480e-04 - val_loss: 4.6398e-04
Epoch 42/100
63/63 [==============================] - 1s 11ms/step - loss: 1.2861e-04 - val_loss: 4.6312e-04
Epoch 43/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3804e-04 - val_loss: 4.6657e-04
Epoch 44/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3601e-04 - val_loss: 4.7045e-04
Epoch 45/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2616e-04 - val_loss: 4.8803e-04
Epoch 46/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3072e-04 - val_loss: 4.7181e-04
Epoch 47/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3521e-04 - val_loss: 4.9072e-04
Epoch 48/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2510e-04 - val_loss: 4.5651e-04
Epoch 49/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2278e-04 - val_loss: 4.5327e-04
Epoch 50/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1469e-04 - val_loss: 4.4990e-04
Epoch 51/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2071e-04 - val_loss: 5.0205e-04
Epoch 52/100
63/63 [==============================] - 1s 11ms/step - loss: 1.1646e-04 - val_loss: 4.5441e-04
Epoch 53/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2111e-04 - val_loss: 4.5300e-04
Epoch 54/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1977e-04 - val_loss: 4.4914e-04
Epoch 55/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2912e-04 - val_loss: 5.1459e-04
Epoch 56/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1183e-04 - val_loss: 4.5036e-04
Epoch 57/100
63/63 [==============================] - 1s 11ms/step - loss: 1.2879e-04 - val_loss: 4.5781e-04
Epoch 58/100
63/63 [==============================] - 1s 9ms/step - loss: 1.1848e-04 - val_loss: 4.4768e-04
Epoch 59/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1764e-04 - val_loss: 4.5664e-04
Epoch 60/100
63/63 [==============================] - 1s 11ms/step - loss: 1.1461e-04 - val_loss: 4.5736e-04
Epoch 61/100
63/63 [==============================] - 1s 10ms/step - loss: 1.4129e-04 - val_loss: 4.4441e-04
Epoch 62/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3415e-04 - val_loss: 4.5507e-04
Epoch 63/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1661e-04 - val_loss: 4.6544e-04
Epoch 64/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1652e-04 - val_loss: 4.3905e-04
Epoch 65/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3683e-04 - val_loss: 4.6446e-04
Epoch 66/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1528e-04 - val_loss: 4.3564e-04
Epoch 67/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1079e-04 - val_loss: 4.3691e-04
Epoch 68/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3547e-04 - val_loss: 4.5128e-04
Epoch 69/100
63/63 [==============================] - 1s 12ms/step - loss: 1.1757e-04 - val_loss: 4.8894e-04
Epoch 70/100
63/63 [==============================] - 1s 9ms/step - loss: 1.3676e-04 - val_loss: 4.3932e-04
Epoch 71/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1054e-04 - val_loss: 4.3898e-04
Epoch 72/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0139e-04 - val_loss: 4.6572e-04
Epoch 73/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1956e-04 - val_loss: 4.6967e-04
Epoch 74/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0036e-04 - val_loss: 4.3061e-04
Epoch 75/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0581e-04 - val_loss: 4.5374e-04
Epoch 76/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0402e-04 - val_loss: 4.3596e-04
Epoch 77/100
63/63 [==============================] - 1s 12ms/step - loss: 1.0839e-04 - val_loss: 4.4327e-04
Epoch 78/100
63/63 [==============================] - 1s 9ms/step - loss: 1.0812e-04 - val_loss: 4.5889e-04
Epoch 79/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2473e-04 - val_loss: 4.5481e-04
Epoch 80/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1652e-04 - val_loss: 4.5988e-04
Epoch 81/100
63/63 [==============================] - 1s 10ms/step - loss: 9.5181e-05 - val_loss: 4.7742e-04
Epoch 82/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0051e-04 - val_loss: 4.5549e-04
Epoch 83/100
63/63 [==============================] - 1s 10ms/step - loss: 9.6763e-05 - val_loss: 4.4827e-04
Epoch 84/100
63/63 [==============================] - 1s 11ms/step - loss: 1.3502e-04 - val_loss: 4.7234e-04
Epoch 85/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1210e-04 - val_loss: 5.1695e-04
Epoch 86/100
63/63 [==============================] - 1s 9ms/step - loss: 9.7829e-05 - val_loss: 4.7815e-04
Epoch 87/100
63/63 [==============================] - 1s 10ms/step - loss: 1.3831e-04 - val_loss: 4.5064e-04
Epoch 88/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1406e-04 - val_loss: 4.5505e-04
Epoch 89/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1579e-04 - val_loss: 4.4977e-04
Epoch 90/100
63/63 [==============================] - 1s 10ms/step - loss: 1.1020e-04 - val_loss: 4.9598e-04
Epoch 91/100
63/63 [==============================] - 1s 10ms/step - loss: 9.8175e-05 - val_loss: 4.4895e-04
Epoch 92/100
63/63 [==============================] - 1s 10ms/step - loss: 1.0228e-04 - val_loss: 4.4821e-04
Epoch 93/100
63/63 [==============================] - 1s 10ms/step - loss: 9.0933e-05 - val_loss: 4.4297e-04
Epoch 94/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2380e-04 - val_loss: 4.5330e-04
Epoch 95/100
63/63 [==============================] - 1s 10ms/step - loss: 9.8996e-05 - val_loss: 4.5713e-04
Epoch 96/100
63/63 [==============================] - 1s 10ms/step - loss: 1.2540e-04 - val_loss: 4.7242e-04
Epoch 97/100
63/63 [==============================] - 1s 10ms/step - loss: 9.6453e-05 - val_loss: 4.5895e-04
Epoch 98/100
63/63 [==============================] - 1s 10ms/step - loss: 9.5070e-05 - val_loss: 4.5552e-04
Epoch 99/100
63/63 [==============================] - 1s 10ms/step - loss: 9.6733e-05 - val_loss: 5.0722e-04
Epoch 100/100
63/63 [==============================] - 1s 10ms/step - loss: 9.6688e-05 - val_loss: 4.4097e-04
79/79 [==============================] - 0s 3ms/step - loss: 1.5385e-04
20/20 [==============================] - 0s 4ms/step - loss: 3.2173e-04
Train Loss: 0.00015384596190415323
Test Loss: 0.000321727100526914
79/79 [==============================] - 1s 3ms/step
20/20 [==============================] - 0s 2ms/step
In [ ]:
pred_oil_test_RNN_revamp_1 = pd.DataFrame(inverse_minmax_scaling(pred_oil_test_RNN_revamp_1, min(daily_oil['Price']), max(daily_oil['Price'])))
pred_oil_train_RNN_revamp_1 = pd.DataFrame(inverse_minmax_scaling(pred_oil_train_RNN_revamp_1, min(daily_oil['Price']), max(daily_oil['Price'])))

pred_oil_train_RNN_revamp_1.set_index(y_train_oil_true.index, inplace = True)
pred_oil_test_RNN_revamp_1.set_index(y_test_oil_true.index, inplace = True)
In [ ]:
# Plotting the actual vs. predicted oil prices for training
plt.figure(figsize=(10, 6))
plt.plot(y_train_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_train_RNN_revamp_1, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Training Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_train_oil_true, pred_oil_train_RNN_revamp_1))
No description has been provided for this image
Root Mean Squared Error (RMSE): 4.00420483914446
In [ ]:
# Plotting the actual vs. predicted oil prices for testing
plt.figure(figsize=(10, 6))
plt.plot(y_test_oil_true, label='Actual Price', color='blue')
plt.plot(pred_oil_test_RNN_revamp_1, label='Predicted Price', color='red')
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Actual vs. Predicted Oil Prices (Testing Data)')
plt.legend()
plt.show()

print("Root Mean Squared Error (RMSE):", mean_squared_error(y_test_oil_true, pred_oil_test_RNN_revamp_1))
No description has been provided for this image
Root Mean Squared Error (RMSE): 8.373712003541904
In [ ]:
 
In [ ]:
 
In [ ]:
# Gold RNN Weights and Biases
weights_and_biases_RNN_gold = model_RNN_Gold_mini.get_weights()

for layer in model_RNN_Gold_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer simple_rnn_4:
  Weight 0: [[ 0.57241434  0.5707887   0.43635768]
 [-0.11927641 -0.6055566   1.1102309 ]
 [ 0.775113    0.39019138 -0.42869887]
 [ 0.5844466   0.66043407 -0.24105771]]
  Weight 1: [[ 0.51458436 -0.68914145 -0.24063341]
 [-0.19654849  0.34755373 -0.7601009 ]
 [-0.9508444  -0.6162934  -0.21138677]]
  Weight 2: [-0.19270697  0.09682255  0.04734078]
Layer simple_rnn_5:
  Weight 0: [[-0.24830925  0.06137959]
 [-0.9686768   0.8348581 ]
 [-0.24330258 -1.0618478 ]]
  Weight 1: [[ 0.6533439   0.75706124]
 [-0.75706124  0.5992652 ]]
  Weight 2: [ 0.         -0.04976025]
Layer dense_1:
  Weight 0: [[ 0.61141026]
 [-0.4575549 ]]
  Weight 1: [0.48927027]
In [ ]:
# Oil RNN Weights and Biases
weights_and_biases_RNN_oil = model_RNN_Oil_mini.get_weights()

for layer in model_RNN_Oil_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer simple_rnn_10:
  Weight 0: [[-0.44388124  0.797928    0.55916363]
 [ 0.07577112 -0.5724575  -0.6276217 ]
 [ 0.970923   -0.7775824  -0.1334143 ]
 [-0.19681369 -0.35563636  0.31823176]]
  Weight 1: [[ 0.04587556 -0.8475645   0.30972102]
 [ 0.85621685 -0.4172926  -0.30456463]
 [-0.45318776 -0.32787365 -0.8317936 ]]
  Weight 2: [-0.03888585  0.          0.01021454]
Layer simple_rnn_11:
  Weight 0: [[-0.20053431 -0.47840807]
 [-0.90590614  0.69363236]
 [ 0.23940547  0.89129406]]
  Weight 1: [[ 0.8299425   0.5129191 ]
 [-0.48222804  0.940427  ]]
  Weight 2: [0.01865044 0.00346284]
Layer dense_3:
  Weight 0: [[-1.1052591 ]
 [-0.53876746]]
  Weight 1: [0.849283]
In [ ]:
# Gold GRU Weights and Biases
weights_and_biases_GRU_gold = model_GRU_Gold_mini.get_weights()

for layer in model_GRU_Gold_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer gru_4:
  Weight 0: [[-0.32477346 -0.40413052  0.36751914  0.10710541  0.06526057  0.3021118
  -0.35515276  0.22622228 -0.5601158 ]
 [ 0.03352977 -0.34246606 -0.36499214 -0.25188768  0.8154616   0.19349465
   0.729241    0.2893533  -0.6718902 ]
 [-0.37019712 -0.08269005 -0.08176886 -0.42671508 -0.34330747 -0.23504774
   0.39097336  0.18225326 -0.5312145 ]
 [-0.55394006  0.16001573 -0.5173019   1.0823594   1.011177    0.92373955
   0.0397414  -0.19241308  0.25637484]]
  Weight 1: [[ 0.324374    0.6846396   0.02301573  0.2426125   0.09973107  0.02623721
   0.65704197  0.4476206   0.259306  ]
 [-0.03921367  0.6603136   0.23195574  0.2785154  -0.01879892  0.31123507
  -0.11405078 -0.67928416  0.7383428 ]
 [-0.14929946 -0.13239038  0.28916556  0.40939233  0.35456544 -0.13819066
  -0.01292812 -0.16517815 -0.9140417 ]]
  Weight 2: [[-0.23073871  0.33744287 -0.11239156 -0.00964386  0.01922943  0.31778455
   0.00131967 -0.02554208 -0.02890994]
 [-0.23073871  0.33744287 -0.11239156 -0.00964386  0.01922943  0.31778455
   0.04720319  0.03504283 -0.05447516]]
Layer gru_5:
  Weight 0: [[-0.11565776 -0.16771916 -0.05069848 -0.10107652  0.2031672   0.38842547]
 [ 0.3314597  -0.41802305 -0.95755875 -0.21280599 -0.1284951   0.51880085]
 [-0.7456997  -0.53482807 -0.04970732  0.29547977 -0.6176726   0.16490354]]
  Weight 1: [[ 0.66113037 -0.62095624 -0.23689945  0.41017267  0.08666687 -0.05202212]
 [ 0.12754706 -0.20501631  0.24104188 -0.40521264  0.08408531 -0.65792567]]
  Weight 2: [[-0.14906427 -0.19360389 -0.00987003  0.08671819  0.00259631 -0.01431703]
 [-0.14906427 -0.19360389 -0.00987003  0.08671819  0.00540897 -0.01361621]]
Layer dense_5:
  Weight 0: [[1.4133223]
 [1.3623364]]
  Weight 1: [-0.01491678]
In [ ]:
# Oil GRU Weights and Biases
weights_and_biases_GRU_oil = model_GRU_Oil_mini.get_weights()

for layer in model_GRU_Oil_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer gru_10:
  Weight 0: [[ 0.2843059  -0.70099497  0.0278506   0.11558157 -0.39333048 -0.08683071
  -0.5072268   0.00912096 -0.20802632]
 [-0.14501585 -0.25723982  0.41343048  0.77125365 -0.25990963 -0.6300324
   0.11607079 -0.02993173 -0.65260774]
 [-0.64616686 -0.00078405 -0.38888732 -0.08734801 -0.16028443  0.17340533
   0.05204669  0.6647571   0.12919545]
 [-0.59478176 -0.2204494  -0.7773499   0.6971826  -0.51153857  0.20902067
   0.36419463  0.21201429  0.52868915]]
  Weight 1: [[-0.09820024 -0.9308781  -0.50186664  0.14015302  0.5618587  -0.44450495
   0.19461459 -0.10641275  0.04683765]
 [ 0.23076029 -0.00647275  0.47827798 -0.22411902  0.18622313 -0.05289062
   0.34686324 -0.02222954  0.30646056]
 [ 0.3138994  -0.3883446  -0.41844806 -0.5020175  -0.23221497  0.05931713
  -0.18809123 -0.86110765 -0.12974006]]
  Weight 2: [[-0.05317213 -0.08883698 -0.12487464  0.0723496   0.02829241  0.0712353
   0.06458326  0.05765288  0.0318803 ]
 [-0.05317213 -0.08883698 -0.12487464  0.0723496   0.02829241  0.0712353
   0.07381168  0.04374463  0.02870562]]
Layer gru_11:
  Weight 0: [[ 0.6032485  -0.59749544  0.44323188 -0.39649886  0.76635695 -0.40313235]
 [-0.47216797 -0.18847623 -0.3772577   0.6588827   0.74402267 -0.04663581]
 [-0.7453564   0.9361454   0.06277055 -0.02009272 -0.5426504   0.3022325 ]]
  Weight 1: [[-0.2464485  -0.19767284  0.81718236 -0.3215112  -0.01568088  0.42252094]
 [-0.28730676  0.5244868   0.55549586  0.705378    0.02653662 -0.1762673 ]]
  Weight 2: [[-0.11340887 -0.01733059 -0.01048756  0.09168102  0.0511051   0.06245126]
 [-0.11340887 -0.01733059 -0.01048756  0.09168102  0.0502771   0.06628101]]
Layer dense_7:
  Weight 0: [[0.85767144]
 [0.1341285 ]]
  Weight 1: [0.04966886]
In [ ]:
# Gold LSTM Weights and Biases
weights_and_biases_LSTM_gold = model_LSTM_Gold_mini.get_weights()

for layer in model_LSTM_Gold_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer lstm_4:
  Weight 0: [[ 2.70797074e-01  1.73846051e-01 -1.29530020e-02 -2.46497959e-01
  -1.03860795e-01  2.29712114e-01 -5.21709502e-01  6.32586479e-01
  -2.42238849e-01 -2.99254537e-01  3.17497551e-01  4.97741916e-04]
 [-2.70390451e-01  6.85513467e-02 -3.51113200e-01  1.91791117e-01
   1.12768255e-01 -4.21010971e-01 -4.98422354e-01  3.46968412e-01
   4.65043157e-01  4.26223814e-01 -2.56958514e-01 -3.49699616e-01]
 [ 3.77861321e-01 -2.86967427e-01 -2.28428897e-02  1.73557937e-01
   6.68021619e-01  5.62801063e-02 -2.58359522e-01  1.28825754e-01
   5.91968656e-01  5.43000996e-01  3.57052177e-01  4.61496115e-01]
 [-4.35774207e-01  3.95568192e-01  2.18827933e-01  4.21524346e-01
   6.35220230e-01  3.22974533e-01 -2.37995028e-01  2.21945181e-01
   3.47122431e-01 -1.13168269e-01  7.23264456e-01 -6.75948143e-01]]
  Weight 1: [[ 0.44023657  0.03390197 -0.28050017 -0.0233858   0.11330712  0.19618148
   0.636252    0.19508675  0.1792378   0.14002313  0.19259532  0.3778737 ]
 [ 0.2598671   0.11786565 -0.27141336 -0.02185344  0.00801632 -0.46618113
   0.10415009  0.21063912 -0.37528133 -0.5453682  -0.06789832  0.02761425]
 [ 0.17223695  0.04077993  0.41567323  0.4714593  -0.04261009  0.14187077
  -0.18420291  0.31081548 -0.35950622  0.15510248 -0.06544561  0.48735407]]
  Weight 2: [ 0.          0.08044815  0.04122639  1.          1.0220306   1.0820919
  0.         -0.02466295  0.11481936  0.          0.04513917  0.02389064]
Layer lstm_5:
  Weight 0: [[ 0.4572832  -0.18861866 -0.3252919   0.03179723 -0.64514303  0.41179854
   0.6746679   0.6331323 ]
 [ 0.31338423 -0.560439   -0.5474561  -0.4492907   0.80409044  0.07923812
  -0.21890493 -0.04261496]
 [ 0.63331854  0.5105966   0.48723927  0.41588467 -0.41540423 -0.08562306
   0.7205053   0.2238506 ]]
  Weight 1: [[ 0.02670211  0.5362595  -0.3068165   0.1457059  -0.30290154  0.52188957
  -0.09000396  0.56230974]
 [-0.21811338 -0.6605688  -0.41200447  0.30687407  0.0508289  -0.10412534
   0.26365662  0.21022628]]
  Weight 2: [ 0.1046442   0.0726632   1.0658643   0.9828685  -0.02349006 -0.00180149
  0.12404977  0.05289884]
Layer dense_9:
  Weight 0: [[1.0973259 ]
 [0.60174924]]
  Weight 1: [-0.00736187]
In [ ]:
# Oil LSTM Weights and Biases
weights_and_biases_LSTM_oil = model_LSTM_Oil_mini.get_weights()

for layer in model_LSTM_Oil_mini.layers:
    print(f"Layer {layer.name}:")
    weights = layer.get_weights()
    for i, weight in enumerate(weights):
        print(f"  Weight {i}: {weight}")
Layer lstm_10:
  Weight 0: [[ 0.19976872  0.01330638 -0.505928    0.5983266   0.14434779  0.53312093
  -0.4608519  -0.33109283 -0.51984215  0.19726878 -0.5011247  -0.6085017 ]
 [ 0.45647496 -0.26462018 -0.13783398  0.14231169 -0.5796116  -0.3277935
  -0.3828839   0.5123976  -0.36811632 -0.58514607  0.15366966  0.5462163 ]
 [-0.15015936 -0.06798798  0.07845902 -0.4349607   0.15290684  0.59278005
   0.45361787 -0.12471855 -0.35955456 -0.13269591 -0.469241   -0.49794316]
 [ 0.48384386 -0.49021915  0.5236936  -0.22173649  0.5014525  -0.17770886
   0.37251562 -0.02932781  0.21889865 -0.08082598 -0.35540813  0.0676744 ]]
  Weight 1: [[-0.27759254 -0.15971579  0.24422455 -0.09481209  0.3132998   0.02958697
   0.16130929  0.50637907  0.23350123 -0.4264917  -0.23708259  0.39331883]
 [-0.12994993 -0.11302979  0.28815824  0.10820347 -0.48507935 -0.28412724
  -0.24678406 -0.37620306  0.1886219  -0.5080086  -0.23147753 -0.09949853]
 [-0.29057086 -0.40727532 -0.00324348  0.07004802  0.3540488  -0.20689297
  -0.16490307 -0.4677913   0.25381613  0.32918546  0.1685435   0.35973412]]
  Weight 2: [0. 0. 0. 1. 1. 1. 0. 0. 0. 0. 0. 0.]
Layer lstm_11:
  Weight 0: [[-0.502594    0.36014384 -0.07107162  0.5935872  -0.14272207 -0.52439916
  -0.642806    0.48416048]
 [ 0.10920948 -0.49003732  0.5631524   0.5531737  -0.10714716 -0.26390874
   0.38494736 -0.336784  ]
 [ 0.06076068  0.4261152  -0.47081846 -0.46484306 -0.07692522  0.62308675
   0.2370683  -0.13691992]]
  Weight 1: [[ 0.52144253 -0.57909375  0.0523731   0.13071275 -0.47006357  0.1939402
  -0.1636238  -0.29592925]
 [-0.11432584  0.41927555 -0.14583276 -0.27708125 -0.5050728   0.10713498
   0.41250056 -0.52570266]]
  Weight 2: [0. 0. 1. 1. 0. 0. 0. 0.]
Layer dense_11:
  Weight 0: [[-0.40463936]
 [-0.2982291 ]]
  Weight 1: [0.6582559]
In [ ]:
X_gold[0]
Out[ ]:
array([[0.19164993, 0.18036693, 0.19366639, 0.59259259],
       [0.19636903, 0.1805531 , 0.1918128 , 0.48053181],
       [0.18854937, 0.17125411, 0.18461418, 0.51851852],
       [0.18502954, 0.1711331 , 0.1866345 , 0.55080722],
       [0.18604356, 0.18046002, 0.18810561, 0.6305793 ]])
In [ ]:
y_gold[0][0]
/tmp/ipykernel_85/1061818878.py:1: FutureWarning: Series.__getitem__ treating keys as positions is deprecated. In a future version, integer keys will always be treated as labels (consistent with DataFrame behavior). To access a value by position, use `ser.iloc[pos]`
  y_gold[0][0]
Out[ ]:
0.1976181651483524
In [ ]:
# Example input
x0 = np.array([[0.19636903, 0.1805531 , 0.1918128 , 0.48053181],
            [0.18854937, 0.17125411, 0.18461418, 0.51851852],
            [0.18502954, 0.1711331 , 0.1866345 , 0.55080722],
            [0.18604356, 0.18046002, 0.18810561, 0.6305793 ],
            [0.19723679, 0.18578436, 0.20112   , 0.54795821]])

# Weights and biases for simple_rnn_4
W_input_0 = np.array([[ 0.57241434,  0.5707887 ,  0.43635768],
                      [-0.11927641, -0.6055566 ,  1.1102309 ],
                      [ 0.775113  ,  0.39019138, -0.42869887],
                      [ 0.5844466 ,  0.66043407, -0.24105771]])

W_recurrent_0 = np.array([[ 0.51458436, -0.68914145, -0.24063341],
                          [-0.19654849,  0.34755373, -0.7601009 ],
                          [-0.9508444 , -0.6162934 , -0.21138677]])

bias_0 = np.array([-0.19270697,  0.09682255,  0.04734078])

# Weights and biases for simple_rnn_5
W_input_1 = np.array([[-0.24830925,  0.06137959],
                      [-0.9686768 ,  0.8348581 ],
                      [-0.24330258, -1.0618478 ]])

W_recurrent_1 = np.array([[ 0.6533439 ,  0.75706124],
                          [-0.75706124,  0.5992652 ]])

bias_1 = np.array([ 0.        , -0.04976025])

# Activation function (ReLU)
def relu(x):
    return np.maximum(0, x)

# Initial hidden state for simple_rnn_4
h_t = np.zeros((3,))

# Process each time step for the first SimpleRNN layer
input_to_rnn_5 = []

for t in range(x0.shape[0]):
    x_t = x0[t]
    h_t = relu(np.dot(x_t, W_input_0) + np.dot(h_t, W_recurrent_0) + bias_0)
    input_to_rnn_5.append(h_t)

input_to_rnn_5 = np.array(input_to_rnn_5)

# Initial hidden state for the second SimpleRNN layer
h_t1 = np.zeros((2,))

# Process each time step for the second SimpleRNN layer
for t in range(input_to_rnn_5.shape[0]):
    x_t1 = input_to_rnn_5[t]
    h_t1 = relu(np.dot(x_t1, W_input_1) + np.dot(h_t1, W_recurrent_1) + bias_1)
    print(f"Hidden state at time {t} for simple_rnn_5: {h_t1}")

# Final hidden state for the dense layer input
final_hidden_state = h_t1

# Weights and biases for dense_1 layer
W_dense = np.array([[ 0.61141026], [-0.4575549]])
bias_dense = np.array([0.48927027])

# Compute the dense layer output
dense_output = np.dot(final_hidden_state, W_dense) + bias_dense
print(f"Dense layer output: {dense_output}")
Hidden state at time 0 for simple_rnn_5: [0.         0.23712295]
Hidden state at time 1 for simple_rnn_5: [0.         0.42440739]
Hidden state at time 2 for simple_rnn_5: [0.         0.62391564]
Hidden state at time 3 for simple_rnn_5: [0.         0.73252627]
Hidden state at time 4 for simple_rnn_5: [0.         0.69425885]
Dense layer output: [0.17160873]
In [ ]:
model_RNN_Gold_mini.predict(X_train_gold)
65/65 [==============================] - 0s 2ms/step
Out[ ]:
array([[0.1726866 ],
       [0.17160875],
       [0.21609509],
       ...,
       [0.48927027],
       [0.48927027],
       [0.48927027]], dtype=float32)
In [ ]:
import numpy as np

# Input sequence (x0)
x0 = np.array([[0.19164993, 0.18036693, 0.19366639, 0.59259259],
               [0.19636903, 0.1805531 , 0.1918128 , 0.48053181],
               [0.18854937, 0.17125411, 0.18461418, 0.51851852],
               [0.18502954, 0.1711331 , 0.1866345 , 0.55080722],
               [0.18604356, 0.18046002, 0.18810561, 0.6305793 ]])

# Extracted weights and biases for gru_4
W_z_4, W_r_4, W_h_4 = np.split(np.array([[-0.32477346, -0.40413052,  0.36751914,  0.10710541,  0.06526057,  0.3021118,
                                    -0.35515276,  0.22622228, -0.5601158],
                                    [0.03352977, -0.34246606, -0.36499214, -0.25188768,  0.8154616,  0.19349465,
                                     0.729241,  0.2893533, -0.6718902],
                                    [-0.37019712, -0.08269005, -0.08176886, -0.42671508, -0.34330747, -0.23504774,
                                     0.39097336,  0.18225326, -0.5312145],
                                    [-0.55394006, 0.16001573, -0.5173019, 1.0823594, 1.011177, 0.92373955,
                                     0.0397414, -0.19241308, 0.25637484]]), 3, axis=1)

U_z_4, U_r_4, U_h_4 = np.split(np.array([[0.324374, 0.6846396, 0.02301573, 0.2426125, 0.09973107, 0.02623721,
                                    0.65704197, 0.4476206, 0.259306],
                                   [-0.03921367, 0.6603136, 0.23195574, 0.2785154, -0.01879892, 0.31123507,
                                    -0.11405078, -0.67928416, 0.7383428],
                                   [-0.14929946, -0.13239038, 0.28916556, 0.40939233, 0.35456544, -0.13819066,
                                    -0.01292812, -0.16517815, -0.9140417]]), 3, axis=1)

b_z_4, b_r_4, b_h_4 = np.split(np.array([[-0.23073871, 0.33744287, -0.11239156, -0.00964386, 0.01922943, 0.31778455,
                                    0.00131967, -0.02554208, -0.02890994],
                                   [-0.23073871, 0.33744287, -0.11239156, -0.00964386, 0.01922943, 0.31778455,
                                    0.04720319, 0.03504283, -0.05447516]]), 3, axis=1)

# Extracted weights and biases for gru_5
W_z_5, W_r_5, W_h_5 = np.split(np.array([[-0.11565776, -0.16771916, -0.05069848, -0.10107652, 0.2031672, 0.38842547],
                                        [0.3314597, -0.41802305, -0.95755875, -0.21280599, -0.1284951, 0.51880085],
                                        [-0.7456997, -0.53482807, -0.04970732, 0.29547977, -0.6176726, 0.16490354]]), 3, axis=1)

U_z_5, U_r_5, U_h_5 = np.split(np.array([[0.66113037, -0.62095624, -0.23689945, 0.41017267, 0.08666687, -0.05202212],
                                         [0.12754706, -0.20501631, 0.24104188, -0.40521264, 0.08408531, -0.65792567]]), 3, axis=1)

b_z_5, b_r_5, b_h_5 = np.split(np.array([[-0.14906427, -0.19360389, -0.00987003, 0.08671819, 0.00259631, -0.01431703],
                                        [-0.14906427, -0.19360389, -0.00987003, 0.08671819, 0.00540897, -0.01361621]]), 3, axis=1)

# Extracted weights and biases for dense_5
W_dense = np.array([[1.4133223], [1.3623364]])
b_dense = np.array([-0.01491678])

# GRU cell feedforward function
def gru_cell_forward(x, h_prev, W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h):
    # Update gate
    z = sigmoid(np.dot(x, W_z) + np.dot(h_prev, U_z) + b_z)
    
    # Reset gate
    r = sigmoid(np.dot(x, W_r) + np.dot(h_prev, U_r) + b_r)
    
    # Candidate hidden state
    h_tilde = np.tanh(np.dot(x, W_h) + np.dot(r * h_prev, U_h) + b_h)
    
    # Current hidden state
    h = z * h_prev + (1 - z) * h_tilde
    
    return h

# Activation functions
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# Initial hidden state for gru_4
h_prev_4 = np.zeros((1, 3))  # Size of the hidden state for gru_4

# Process each time step for gru_4
for t in range(x0.shape[0]):
    x_t = x0[t].reshape(1, -1)
    h_prev_4 = gru_cell_forward(x_t, h_prev_4, W_z_4, W_r_4, W_h_4, U_z_4, U_r_4, U_h_4, b_z_4, b_r_4, b_h_4)

# The output of gru_4 becomes the input to gru_5
gru_5_input = h_prev_4

# Initial hidden state for gru_5
h_prev_5 = np.zeros((1, 2))  # Size of the hidden state for gru_5

# Process the input for gru_5 (no sequence, just the final output of gru_4)
h_prev_5 = gru_cell_forward(gru_5_input, h_prev_5, W_z_5, W_r_5, W_h_5, U_z_5, U_r_5, U_h_5, b_z_5, b_r_5, b_h_5)

# The output of gru_5 goes into the dense layer
dense_input = h_prev_5

# Forward pass through the dense layer
output = np.dot(dense_input, W_dense) + b_dense

print("Final output of the model:", output)
Final output of the model: [[0.12042789]
 [0.16383203]]
In [ ]:
 
In [ ]:
x0[1]
Out[ ]:
array([0.19636903, 0.1805531 , 0.1918128 , 0.48053181])
In [ ]:
x0[1].reshape(1, -1)
Out[ ]:
array([[0.19636903, 0.1805531 , 0.1918128 , 0.48053181]])
In [ ]:
import numpy as np

# Input example x0
x0 = np.array([[0.19164993, 0.18036693, 0.19366639, 0.59259259],
               [0.19636903, 0.1805531, 0.1918128, 0.48053181],
               [0.18854937, 0.17125411, 0.18461418, 0.51851852],
               [0.18502954, 0.1711331, 0.1866345, 0.55080722],
               [0.18604356, 0.18046002, 0.18810561, 0.6305793]])

# Weights and biases for the first GRU layer (gru_4)
W1 = np.array([[-0.32477346, -0.40413052,  0.36751914,  0.10710541,  0.06526057,  0.3021118, -0.35515276,  0.22622228, -0.5601158 ],
               [ 0.03352977, -0.34246606, -0.36499214, -0.25188768,  0.8154616,  0.19349465, 0.729241,  0.2893533, -0.6718902 ],
               [-0.37019712, -0.08269005, -0.08176886, -0.42671508, -0.34330747, -0.23504774, 0.39097336,  0.18225326, -0.5312145 ],
               [-0.55394006,  0.16001573, -0.5173019,  1.0823594,  1.011177,  0.92373955, 0.0397414, -0.19241308,  0.25637484]])
U1 = np.array([[ 0.324374,  0.6846396,  0.02301573,  0.2426125,  0.09973107,  0.02623721, 0.65704197,  0.4476206,  0.259306  ],
               [-0.03921367,  0.6603136,  0.23195574,  0.2785154, -0.01879892,  0.31123507, -0.11405078, -0.67928416,  0.7383428 ],
               [-0.14929946, -0.13239038,  0.28916556,  0.40939233,  0.35456544, -0.13819066, -0.01292812, -0.16517815, -0.9140417 ]])
b1 = np.array([[-0.23073871,  0.33744287, -0.11239156, -0.00964386,  0.01922943,  0.31778455,  0.00131967, -0.02554208, -0.02890994],
               [-0.23073871,  0.33744287, -0.11239156, -0.00964386,  0.01922943,  0.31778455,  0.04720319,  0.03504283, -0.05447516]])

# Weights and biases for the second GRU layer (gru_5)
W2 = np.array([[-0.11565776, -0.16771916, -0.05069848, -0.10107652,  0.2031672,  0.38842547],
               [ 0.3314597, -0.41802305, -0.95755875, -0.21280599, -0.1284951,  0.51880085],
               [-0.7456997, -0.53482807, -0.04970732,  0.29547977, -0.6176726,  0.16490354]])
U2 = np.array([[ 0.66113037, -0.62095624, -0.23689945,  0.41017267,  0.08666687, -0.05202212],
               [ 0.12754706, -0.20501631,  0.24104188, -0.40521264,  0.08408531, -0.65792567]])
b2 = np.array([[-0.14906427, -0.19360389, -0.00987003,  0.08671819,  0.00259631, -0.01431703],
               [-0.14906427, -0.19360389, -0.00987003,  0.08671819,  0.00540897, -0.01361621]])

# Weights and biases for the dense layer (dense_5)
W_dense = np.array([[1.4133223], [1.3623364]])
b_dense = np.array([-0.01491678])

# Function for sigmoid activation
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# Function for tanh activation
def tanh(x):
    return np.tanh(x)

# Initial hidden state for first GRU layer
h_prev1 = np.zeros(W1.shape[1] // 3)

# Process the input through the first GRU layer
h1_output = []
for t in range(x0.shape[0]):
    x_t = x0[t]

    # Update gate
    z_t = sigmoid(np.dot(x_t, W1[:, :3]) + np.dot(h_prev1, U1[:, :3]) + b1[:, :3])
    # Reset gate
    r_t = sigmoid(np.dot(x_t, W1[:, 3:6]) + np.dot(h_prev1, U1[:, 3:6]) + b1[:, 3:6])
    # Candidate hidden state
    h_tilde_t = tanh(np.dot(x_t, W1[:, 6:9]) + np.dot(r_t * h_prev1, U1[:, 6:9]) + b1[:, 6:9])
    # Final hidden state
    h_t = (1 - z_t) * h_prev1 + z_t * h_tilde_t
    
    h_prev1 = h_t
    h1_output.append(h_t)

h1_output = np.array(h1_output)

# Initial hidden state for second GRU layer
h_prev2 = np.zeros(W2.shape[1] // 3)

# Process the output of the first GRU layer through the second GRU layer
for t in range(h1_output.shape[0]):
    h1_t = h1_output[t]

    # Update gate
    z_t2 = sigmoid(np.dot(h1_t, W2[:, :2]) + np.dot(h_prev2, U2[:, :2]) + b2[:, :2])
    # Reset gate
    r_t2 = sigmoid(np.dot(h1_t, W2[:, 2:4]) + np.dot(h_prev2, U2[:, 2:4]) + b2[:, 2:4])
    # Candidate hidden state
    h_tilde_t2 = tanh(np.dot(h1_t, W2[:, 4:6]) + np.dot(r_t2 * h_prev2, U2[:, 4:6]) + b2[:, 4:6])
    # Final hidden state
    h_t2 = (1 - z_t2) * h_prev2 + z_t2 * h_tilde_t2
    
    h_prev2 = h_t2

# Process the output of the second GRU layer through the dense layer
output = np.dot(h_prev2, W_dense) + b_dense

print("Final output:", output)
Final output: [[0.16508377]
 [0.21626961]]
In [ ]:
X_gold[0]
Out[ ]:
array([[0.19164993, 0.18036693, 0.19366639, 0.59259259],
       [0.19636903, 0.1805531 , 0.1918128 , 0.48053181],
       [0.18854937, 0.17125411, 0.18461418, 0.51851852],
       [0.18502954, 0.1711331 , 0.1866345 , 0.55080722],
       [0.18604356, 0.18046002, 0.18810561, 0.6305793 ]])